diff options
756 files changed, 29472 insertions, 14457 deletions
diff --git a/Documentation/devicetree/bindings/net/broadcom-mdio-unimac.txt b/Documentation/devicetree/bindings/net/broadcom-mdio-unimac.txt new file mode 100644 index 000000000000..ab0bb4247d14 --- /dev/null +++ b/Documentation/devicetree/bindings/net/broadcom-mdio-unimac.txt @@ -0,0 +1,39 @@ +* Broadcom UniMAC MDIO bus controller + +Required properties: +- compatible: should one from "brcm,genet-mdio-v1", "brcm,genet-mdio-v2", + "brcm,genet-mdio-v3", "brcm,genet-mdio-v4" or "brcm,unimac-mdio" +- reg: address and length of the regsiter set for the device, first one is the + base register, and the second one is optional and for indirect accesses to + larger than 16-bits MDIO transactions +- reg-names: name(s) of the register must be "mdio" and optional "mdio_indir_rw" +- #size-cells: must be 1 +- #address-cells: must be 0 + +Optional properties: +- interrupts: must be one if the interrupt is shared with the Ethernet MAC or + Ethernet switch this MDIO block is integrated from, or must be two, if there + are two separate interrupts, first one must be "mdio done" and second must be + for "mdio error" +- interrupt-names: must be "mdio_done_error" when there is a share interrupt fed + to this hardware block, or must be "mdio_done" for the first interrupt and + "mdio_error" for the second when there are separate interrupts + +Child nodes of this MDIO bus controller node are standard Ethernet PHY device +nodes as described in Documentation/devicetree/bindings/net/phy.txt + +Example: + +mdio@403c0 { + compatible = "brcm,unimac-mdio"; + reg = <0x403c0 0x8 0x40300 0x18>; + reg-names = "mdio", "mdio_indir_rw"; + #size-cells = <1>; + #address-cells = <0>; + + ... + phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; +}; diff --git a/Documentation/devicetree/bindings/net/broadcom-sf2.txt b/Documentation/devicetree/bindings/net/broadcom-sf2.txt new file mode 100644 index 000000000000..30d487597ecb --- /dev/null +++ b/Documentation/devicetree/bindings/net/broadcom-sf2.txt @@ -0,0 +1,78 @@ +* Broadcom Starfighter 2 integrated swich + +Required properties: + +- compatible: should be "brcm,bcm7445-switch-v4.0" +- reg: addresses and length of the register sets for the device, must be 6 + pairs of register addresses and lengths +- interrupts: interrupts for the devices, must be two interrupts +- dsa,mii-bus: phandle to the MDIO bus controller, see dsa/dsa.txt +- dsa,ethernet: phandle to the CPU network interface controller, see dsa/dsa.txt +- #size-cells: must be 0 +- #address-cells: must be 2, see dsa/dsa.txt + +Subnodes: + +The integrated switch subnode should be specified according to the binding +described in dsa/dsa.txt. + +Optional properties: + +- reg-names: litteral names for the device base register addresses, when present + must be: "core", "reg", "intrl2_0", "intrl2_1", "fcb", "acb" + +- interrupt-names: litternal names for the device interrupt lines, when present + must be: "switch_0" and "switch_1" + +- brcm,num-gphy: specify the maximum number of integrated gigabit PHYs in the + switch + +- brcm,num-rgmii-ports: specify the maximum number of RGMII interfaces supported + by the switch + +- brcm,fcb-pause-override: boolean property, if present indicates that the switch + supports Failover Control Block pause override capability + +- brcm,acb-packets-inflight: boolean property, if present indicates that the switch + Admission Control Block supports reporting the number of packets in-flight in a + switch queue + +Example: + +switch_top@f0b00000 { + compatible = "simple-bus"; + #size-cells = <1>; + #address-cells = <1>; + ranges = <0 0xf0b00000 0x40804>; + + ethernet_switch@0 { + compatible = "brcm,bcm7445-switch-v4.0"; + #size-cells = <0>; + #address-cells = <2>; + reg = <0x0 0x40000 + 0x40000 0x110 + 0x40340 0x30 + 0x40380 0x30 + 0x40400 0x34 + 0x40600 0x208>; + interrupts = <0 0x18 0 + 0 0x19 0>; + brcm,num-gphy = <1>; + brcm,num-rgmii-ports = <2>; + brcm,fcb-pause-override; + brcm,acb-packets-inflight; + + ... + switch@0 { + reg = <0 0>; + #size-cells = <0>; + #address-cells <1>; + + port@0 { + label = "gphy"; + reg = <0>; + }; + ... + }; + }; +}; diff --git a/Documentation/devicetree/bindings/net/can/m_can.txt b/Documentation/devicetree/bindings/net/can/m_can.txt new file mode 100644 index 000000000000..9e331777c203 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/m_can.txt @@ -0,0 +1,67 @@ +Bosch MCAN controller Device Tree Bindings +------------------------------------------------- + +Required properties: +- compatible : Should be "bosch,m_can" for M_CAN controllers +- reg : physical base address and size of the M_CAN + registers map and Message RAM +- reg-names : Should be "m_can" and "message_ram" +- interrupts : Should be the interrupt number of M_CAN interrupt + line 0 and line 1, could be same if sharing + the same interrupt. +- interrupt-names : Should contain "int0" and "int1" +- clocks : Clocks used by controller, should be host clock + and CAN clock. +- clock-names : Should contain "hclk" and "cclk" +- pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt +- pinctrl-names : Names corresponding to the numbered pinctrl states +- bosch,mram-cfg : Message RAM configuration data. + Multiple M_CAN instances can share the same Message + RAM and each element(e.g Rx FIFO or Tx Buffer and etc) + number in Message RAM is also configurable, + so this property is telling driver how the shared or + private Message RAM are used by this M_CAN controller. + + The format should be as follows: + <offset sidf_elems xidf_elems rxf0_elems rxf1_elems + rxb_elems txe_elems txb_elems> + The 'offset' is an address offset of the Message RAM + where the following elements start from. This is + usually set to 0x0 if you're using a private Message + RAM. The remain cells are used to specify how many + elements are used for each FIFO/Buffer. + + M_CAN includes the following elements according to user manual: + 11-bit Filter 0-128 elements / 0-128 words + 29-bit Filter 0-64 elements / 0-128 words + Rx FIFO 0 0-64 elements / 0-1152 words + Rx FIFO 1 0-64 elements / 0-1152 words + Rx Buffers 0-64 elements / 0-1152 words + Tx Event FIFO 0-32 elements / 0-64 words + Tx Buffers 0-32 elements / 0-576 words + + Please refer to 2.4.1 Message RAM Configuration in + Bosch M_CAN user manual for details. + +Example: +SoC dtsi: +m_can1: can@020e8000 { + compatible = "bosch,m_can"; + reg = <0x020e8000 0x4000>, <0x02298000 0x4000>; + reg-names = "m_can", "message_ram"; + interrupts = <0 114 0x04>, + <0 114 0x04>; + interrupt-names = "int0", "int1"; + clocks = <&clks IMX6SX_CLK_CANFD>, + <&clks IMX6SX_CLK_CANFD>; + clock-names = "hclk", "cclk"; + bosch,mram-cfg = <0x0 0 0 32 0 0 0 1>; + status = "disabled"; +}; + +Board dts: +&m_can1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_m_can1>; + status = "enabled"; +}; diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt new file mode 100644 index 000000000000..002d8440bf66 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt @@ -0,0 +1,43 @@ +Renesas R-Car CAN controller Device Tree Bindings +------------------------------------------------- + +Required properties: +- compatible: "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC. + "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC. + "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC. + "renesas,can-r8a7791" if CAN controller is a part of R8A7791 SoC. +- reg: physical base address and size of the R-Car CAN register map. +- interrupts: interrupt specifier for the sole interrupt. +- clocks: phandles and clock specifiers for 3 CAN clock inputs. +- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk". +- pinctrl-0: pin control group to be used for this controller. +- pinctrl-names: must be "default". + +Optional properties: +- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are: + <0x0> (default) : Peripheral clock (clkp1) + <0x1> : Peripheral clock (clkp2) + <0x3> : Externally input clock + +Example +------- + +SoC common .dtsi file: + + can0: can@e6e80000 { + compatible = "renesas,can-r8a7791"; + reg = <0 0xe6e80000 0 0x1000>; + interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&mstp9_clks R8A7791_CLK_RCAN0>, + <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>; + clock-names = "clkp1", "clkp2", "can_clk"; + status = "disabled"; + }; + +Board specific .dts file: + +&can0 { + pinctrl-0 = <&can0_pins>; + pinctrl-names = "default"; + status = "okay"; +}; diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index 49f4f7ae3f51..a62c889aafca 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt @@ -39,6 +39,22 @@ Optionnal property: This property is only used when switches are being chained/cascaded together. +- phy-handle : Phandle to a PHY on an external MDIO bus, not the + switch internal one. See + Documentation/devicetree/bindings/net/ethernet.txt + for details. + +- phy-mode : String representing the connection to the designated + PHY node specified by the 'phy-handle' property. See + Documentation/devicetree/bindings/net/ethernet.txt + for details. + +Optional subnodes: +- fixed-link : Fixed-link subnode describing a link to a non-MDIO + managed entity. See + Documentation/devicetree/bindings/net/fixed-link.txt + for details. + Example: dsa@0 { @@ -58,6 +74,7 @@ Example: port@0 { reg = <0>; label = "lan1"; + phy-handle = <&phy0>; }; port@1 { diff --git a/Documentation/devicetree/bindings/net/emac_rockchip.txt b/Documentation/devicetree/bindings/net/emac_rockchip.txt new file mode 100644 index 000000000000..8dc1c79fef7f --- /dev/null +++ b/Documentation/devicetree/bindings/net/emac_rockchip.txt @@ -0,0 +1,50 @@ +* ARC EMAC 10/100 Ethernet platform driver for Rockchip Rk3066/RK3188 SoCs + +Required properties: +- compatible: Should be "rockchip,rk3066-emac" or "rockchip,rk3188-emac" + according to the target SoC. +- reg: Address and length of the register set for the device +- interrupts: Should contain the EMAC interrupts +- rockchip,grf: phandle to the syscon grf used to control speed and mode + for emac. +- phy: see ethernet.txt file in the same directory. +- phy-mode: see ethernet.txt file in the same directory. + +Optional properties: +- phy-supply: phandle to a regulator if the PHY needs one + +Clock handling: +- clocks: Must contain an entry for each entry in clock-names. +- clock-names: Shall be "hclk" for the host clock needed to calculate and set + polling period of EMAC and "macref" for the reference clock needed to transfer + data to and from the phy. + +Child nodes of the driver are the individual PHY devices connected to the +MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus. + +Examples: + +ethernet@10204000 { + compatible = "rockchip,rk3188-emac"; + reg = <0xc0fc2000 0x3c>; + interrupts = <6>; + mac-address = [ 00 11 22 33 44 55 ]; + + clocks = <&cru HCLK_EMAC>, <&cru SCLK_MAC>; + clock-names = "hclk", "macref"; + + pinctrl-names = "default"; + pinctrl-0 = <&emac_xfer>, <&emac_mdio>, <&phy_int>; + + rockchip,grf = <&grf>; + + phy = <&phy0>; + phy-mode = "rmii"; + phy-supply = <&vcc_rmii>; + + #address-cells = <1>; + #size-cells = <0>; + phy0: ethernet-phy@0 { + reg = <1>; + }; +}; diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index 8a2c7b55ec16..0c8775c45798 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt @@ -16,6 +16,12 @@ Optional properties: - phy-handle : phandle to the PHY device connected to this device. - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. Use instead of phy-handle. +- fsl,num-tx-queues : The property is valid for enet-avb IP, which supports + hw multi queues. Should specify the tx queue number, otherwise set tx queue + number to 1. +- fsl,num-rx-queues : The property is valid for enet-avb IP, which supports + hw multi queues. Should specify the rx queue number, otherwise set rx queue + number to 1. Optional subnodes: - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes diff --git a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt index 2a60cd3e8d5d..3a9d67951606 100644 --- a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt +++ b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt @@ -12,6 +12,10 @@ Required properties: - altr,sysmgr-syscon : Should be the phandle to the system manager node that encompasses the glue register, the register offset, and the register shift. +Optional properties: +altr,emac-splitter: Should be the phandle to the emac splitter soft IP node if + DWMAC controller is connected emac splitter. + Example: gmac0: ethernet@ff700000 { diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index d16f424c5e8d..014e0319a5c4 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -951,7 +951,7 @@ Size modifier is one of ... Mode modifier is one of: - BPF_IMM 0x00 /* classic BPF only, reserved in eBPF */ + BPF_IMM 0x00 /* used for 32-bit mov in classic BPF and 64-bit in eBPF */ BPF_ABS 0x20 BPF_IND 0x40 BPF_MEM 0x60 @@ -995,6 +995,12 @@ BPF_XADD | BPF_DW | BPF_STX: lock xadd *(u64 *)(dst_reg + off16) += src_reg Where size is one of: BPF_B or BPF_H or BPF_W or BPF_DW. Note that 1 and 2 byte atomic increments are not supported. +eBPF has one 16-byte instruction: BPF_LD | BPF_DW | BPF_IMM which consists +of two consecutive 'struct bpf_insn' 8-byte blocks and interpreted as single +instruction that loads 64-bit immediate value into a dst_reg. +Classic BPF has similar instruction: BPF_LD | BPF_W | BPF_IMM which loads +32-bit immediate value into a register. + Testing ------- diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 29a93518bf18..1b5581a30d77 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -65,6 +65,12 @@ neigh/default/gc_thresh1 - INTEGER purge entries if there are fewer than this number. Default: 128 +neigh/default/gc_thresh2 - INTEGER + Threshold when garbage collector becomes more aggressive about + purging entries. Entries older than 5 seconds will be cleared + when over this number. + Default: 512 + neigh/default/gc_thresh3 - INTEGER Maximum number of neighbor entries allowed. Increase this when using large numbers of interfaces and when communicating @@ -838,6 +844,11 @@ igmp_max_memberships - INTEGER conf/all/* is special, changes the settings for all interfaces +igmp_qrv - INTEGER + Controls the IGMP query robustness variable (see RFC2236 8.1). + Default: 2 (as specified by RFC2236 8.1) + Minimum: 1 (as specified by RFC6636 4.5) + log_martians - BOOLEAN Log packets with impossible addresses to kernel log. log_martians for the interface will be enabled if at least one of @@ -941,14 +952,9 @@ accept_source_route - BOOLEAN FALSE (host) accept_local - BOOLEAN - Accept packets with local source addresses. In combination - with suitable routing, this can be used to direct packets - between two local interfaces over the wire and have them - accepted properly. - - rp_filter must be set to a non-zero value in order for - accept_local to have an effect. - + Accept packets with local source addresses. In combination with + suitable routing, this can be used to direct packets between two + local interfaces over the wire and have them accepted properly. default FALSE route_localnet - BOOLEAN @@ -1146,6 +1152,11 @@ anycast_src_echo_reply - BOOLEAN FALSE: disabled Default: FALSE +mld_qrv - INTEGER + Controls the MLD query robustness variable (see RFC3810 9.1). + Default: 2 (as specified by RFC3810 9.1) + Minimum: 1 (as specified by RFC6636 4.5) + IPv6 Fragmentation: ip6frag_high_thresh - INTEGER diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 897f942b976b..412f45ca2d73 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -1,102 +1,307 @@ -The existing interfaces for getting network packages time stamped are: + +1. Control Interfaces + +The interfaces for receiving network packages timestamps are: * SO_TIMESTAMP - Generate time stamp for each incoming packet using the (not necessarily - monotonous!) system time. Result is returned via recv_msg() in a - control message as timeval (usec resolution). + Generates a timestamp for each incoming packet in (not necessarily + monotonic) system time. Reports the timestamp via recvmsg() in a + control message as struct timeval (usec resolution). * SO_TIMESTAMPNS - Same time stamping mechanism as SO_TIMESTAMP, but returns result as - timespec (nsec resolution). + Same timestamping mechanism as SO_TIMESTAMP, but reports the + timestamp as struct timespec (nsec resolution). * IP_MULTICAST_LOOP + SO_TIMESTAMP[NS] - Only for multicasts: approximate send time stamp by receiving the looped - packet and using its receive time stamp. + Only for multicast:approximate transmit timestamp obtained by + reading the looped packet receive timestamp. -The following interface complements the existing ones: receive time -stamps can be generated and returned for arbitrary packets and much -closer to the point where the packet is really sent. Time stamps can -be generated in software (as before) or in hardware (if the hardware -has such a feature). +* SO_TIMESTAMPING + Generates timestamps on reception, transmission or both. Supports + multiple timestamp sources, including hardware. Supports generating + timestamps for stream sockets. -SO_TIMESTAMPING: -Instructs the socket layer which kind of information should be collected -and/or reported. The parameter is an integer with some of the following -bits set. Setting other bits is an error and doesn't change the current -state. +1.1 SO_TIMESTAMP: -Four of the bits are requests to the stack to try to generate -timestamps. Any combination of them is valid. +This socket option enables timestamping of datagrams on the reception +path. Because the destination socket, if any, is not known early in +the network stack, the feature has to be enabled for all packets. The +same is true for all early receive timestamp options. -SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamps in hardware -SOF_TIMESTAMPING_TX_SOFTWARE: try to obtain send time stamps in software -SOF_TIMESTAMPING_RX_HARDWARE: try to obtain receive time stamps in hardware -SOF_TIMESTAMPING_RX_SOFTWARE: try to obtain receive time stamps in software +For interface details, see `man 7 socket`. + + +1.2 SO_TIMESTAMPNS: + +This option is identical to SO_TIMESTAMP except for the returned data type. +Its struct timespec allows for higher resolution (ns) timestamps than the +timeval of SO_TIMESTAMP (ms). + + +1.3 SO_TIMESTAMPING: + +Supports multiple types of timestamp requests. As a result, this +socket option takes a bitmap of flags, not a boolean. In + + err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, &val); + +val is an integer with any of the following bits set. Setting other +bit returns EINVAL and does not change the current state. -The other three bits control which timestamps will be reported in a -generated control message. If none of these bits are set or if none of -the set bits correspond to data that is available, then the control -message will not be generated: -SOF_TIMESTAMPING_SOFTWARE: report systime if available -SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available (deprecated) -SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available +1.3.1 Timestamp Generation -It is worth noting that timestamps may be collected for reasons other -than being requested by a particular socket with -SOF_TIMESTAMPING_[TR]X_(HARD|SOFT)WARE. For example, most drivers that -can generate hardware receive timestamps ignore -SOF_TIMESTAMPING_RX_HARDWARE. It is still a good idea to set that flag -in case future drivers pay attention. +Some bits are requests to the stack to try to generate timestamps. Any +combination of them is valid. Changes to these bits apply to newly +created packets, not to packets already in the stack. As a result, it +is possible to selectively request timestamps for a subset of packets +(e.g., for sampling) by embedding an send() call within two setsockopt +calls, one to enable timestamp generation and one to disable it. +Timestamps may also be generated for reasons other than being +requested by a particular socket, such as when receive timestamping is +enabled system wide, as explained earlier. -If timestamps are reported, they will appear in a control message with -cmsg_level==SOL_SOCKET, cmsg_type==SO_TIMESTAMPING, and a payload like -this: +SOF_TIMESTAMPING_RX_HARDWARE: + Request rx timestamps generated by the network adapter. + +SOF_TIMESTAMPING_RX_SOFTWARE: + Request rx timestamps when data enters the kernel. These timestamps + are generated just after a device driver hands a packet to the + kernel receive stack. + +SOF_TIMESTAMPING_TX_HARDWARE: + Request tx timestamps generated by the network adapter. + +SOF_TIMESTAMPING_TX_SOFTWARE: + Request tx timestamps when data leaves the kernel. These timestamps + are generated in the device driver as close as possible, but always + prior to, passing the packet to the network interface. Hence, they + require driver support and may not be available for all devices. + +SOF_TIMESTAMPING_TX_SCHED: + Request tx timestamps prior to entering the packet scheduler. Kernel + transmit latency is, if long, often dominated by queuing delay. The + difference between this timestamp and one taken at + SOF_TIMESTAMPING_TX_SOFTWARE will expose this latency independent + of protocol processing. The latency incurred in protocol + processing, if any, can be computed by subtracting a userspace + timestamp taken immediately before send() from this timestamp. On + machines with virtual devices where a transmitted packet travels + through multiple devices and, hence, multiple packet schedulers, + a timestamp is generated at each layer. This allows for fine + grained measurement of queuing delay. + +SOF_TIMESTAMPING_TX_ACK: + Request tx timestamps when all data in the send buffer has been + acknowledged. This only makes sense for reliable protocols. It is + currently only implemented for TCP. For that protocol, it may + over-report measurement, because the timestamp is generated when all + data up to and including the buffer at send() was acknowledged: the + cumulative acknowledgment. The mechanism ignores SACK and FACK. + + +1.3.2 Timestamp Reporting + +The other three bits control which timestamps will be reported in a +generated control message. Changes to the bits take immediate +effect at the timestamp reporting locations in the stack. Timestamps +are only reported for packets that also have the relevant timestamp +generation request set. + +SOF_TIMESTAMPING_SOFTWARE: + Report any software timestamps when available. + +SOF_TIMESTAMPING_SYS_HARDWARE: + This option is deprecated and ignored. + +SOF_TIMESTAMPING_RAW_HARDWARE: + Report hardware timestamps as generated by + SOF_TIMESTAMPING_TX_HARDWARE when available. + + +1.3.3 Timestamp Options + +The interface supports one option + +SOF_TIMESTAMPING_OPT_ID: + + Generate a unique identifier along with each packet. A process can + have multiple concurrent timestamping requests outstanding. Packets + can be reordered in the transmit path, for instance in the packet + scheduler. In that case timestamps will be queued onto the error + queue out of order from the original send() calls. This option + embeds a counter that is incremented at send() time, to order + timestamps within a flow. + + This option is implemented only for transmit timestamps. There, the + timestamp is always looped along with a struct sock_extended_err. + The option modifies field ee_info to pass an id that is unique + among all possibly concurrently outstanding timestamp requests for + that socket. In practice, it is a monotonically increasing u32 + (that wraps). + + In datagram sockets, the counter increments on each send call. In + stream sockets, it increments with every byte. + + +1.4 Bytestream Timestamps + +The SO_TIMESTAMPING interface supports timestamping of bytes in a +bytestream. Each request is interpreted as a request for when the +entire contents of the buffer has passed a timestamping point. That +is, for streams option SOF_TIMESTAMPING_TX_SOFTWARE will record +when all bytes have reached the device driver, regardless of how +many packets the data has been converted into. + +In general, bytestreams have no natural delimiters and therefore +correlating a timestamp with data is non-trivial. A range of bytes +may be split across segments, any segments may be merged (possibly +coalescing sections of previously segmented buffers associated with +independent send() calls). Segments can be reordered and the same +byte range can coexist in multiple segments for protocols that +implement retransmissions. + +It is essential that all timestamps implement the same semantics, +regardless of these possible transformations, as otherwise they are +incomparable. Handling "rare" corner cases differently from the +simple case (a 1:1 mapping from buffer to skb) is insufficient +because performance debugging often needs to focus on such outliers. + +In practice, timestamps can be correlated with segments of a +bytestream consistently, if both semantics of the timestamp and the +timing of measurement are chosen correctly. This challenge is no +different from deciding on a strategy for IP fragmentation. There, the +definition is that only the first fragment is timestamped. For +bytestreams, we chose that a timestamp is generated only when all +bytes have passed a point. SOF_TIMESTAMPING_TX_ACK as defined is easy to +implement and reason about. An implementation that has to take into +account SACK would be more complex due to possible transmission holes +and out of order arrival. + +On the host, TCP can also break the simple 1:1 mapping from buffer to +skbuff as a result of Nagle, cork, autocork, segmentation and GSO. The +implementation ensures correctness in all cases by tracking the +individual last byte passed to send(), even if it is no longer the +last byte after an skbuff extend or merge operation. It stores the +relevant sequence number in skb_shinfo(skb)->tskey. Because an skbuff +has only one such field, only one timestamp can be generated. + +In rare cases, a timestamp request can be missed if two requests are +collapsed onto the same skb. A process can detect this situation by +enabling SOF_TIMESTAMPING_OPT_ID and comparing the byte offset at +send time with the value returned for each timestamp. It can prevent +the situation by always flushing the TCP stack in between requests, +for instance by enabling TCP_NODELAY and disabling TCP_CORK and +autocork. + +These precautions ensure that the timestamp is generated only when all +bytes have passed a timestamp point, assuming that the network stack +itself does not reorder the segments. The stack indeed tries to avoid +reordering. The one exception is under administrator control: it is +possible to construct a packet scheduler configuration that delays +segments from the same stream differently. Such a setup would be +unusual. + + +2 Data Interfaces + +Timestamps are read using the ancillary data feature of recvmsg(). +See `man 3 cmsg` for details of this interface. The socket manual +page (`man 7 socket`) describes how timestamps generated with +SO_TIMESTAMP and SO_TIMESTAMPNS records can be retrieved. + + +2.1 SCM_TIMESTAMPING records + +These timestamps are returned in a control message with cmsg_level +SOL_SOCKET, cmsg_type SCM_TIMESTAMPING, and payload of type struct scm_timestamping { - struct timespec systime; - struct timespec hwtimetrans; - struct timespec hwtimeraw; + struct timespec ts[3]; }; -recvmsg() can be used to get this control message for regular incoming -packets. For send time stamps the outgoing packet is looped back to -the socket's error queue with the send time stamp(s) attached. It can -be received with recvmsg(flags=MSG_ERRQUEUE). The call returns the -original outgoing packet data including all headers preprended down to -and including the link layer, the scm_timestamping control message and -a sock_extended_err control message with ee_errno==ENOMSG and -ee_origin==SO_EE_ORIGIN_TIMESTAMPING. A socket with such a pending -bounced packet is ready for reading as far as select() is concerned. -If the outgoing packet has to be fragmented, then only the first -fragment is time stamped and returned to the sending socket. - -All three values correspond to the same event in time, but were -generated in different ways. Each of these values may be empty (= all -zero), in which case no such value was available. If the application -is not interested in some of these values, they can be left blank to -avoid the potential overhead of calculating them. - -systime is the value of the system time at that moment. This -corresponds to the value also returned via SO_TIMESTAMP[NS]. If the -time stamp was generated by hardware, then this field is -empty. Otherwise it is filled in if SOF_TIMESTAMPING_SOFTWARE is -set. - -hwtimeraw is the original hardware time stamp. Filled in if -SOF_TIMESTAMPING_RAW_HARDWARE is set. No assumptions about its -relation to system time should be made. - -hwtimetrans is always zero. This field is deprecated. It used to hold -hw timestamps converted to system time. Instead, expose the hardware -clock device on the NIC directly as a HW PTP clock source, to allow -time conversion in userspace and optionally synchronize system time -with a userspace PTP stack such as linuxptp. For the PTP clock API, -see Documentation/ptp/ptp.txt. - - -SIOCSHWTSTAMP, SIOCGHWTSTAMP: +The structure can return up to three timestamps. This is a legacy +feature. Only one field is non-zero at any time. Most timestamps +are passed in ts[0]. Hardware timestamps are passed in ts[2]. + +ts[1] used to hold hardware timestamps converted to system time. +Instead, expose the hardware clock device on the NIC directly as +a HW PTP clock source, to allow time conversion in userspace and +optionally synchronize system time with a userspace PTP stack such +as linuxptp. For the PTP clock API, see Documentation/ptp/ptp.txt. + +2.1.1 Transmit timestamps with MSG_ERRQUEUE + +For transmit timestamps the outgoing packet is looped back to the +socket's error queue with the send timestamp(s) attached. A process +receives the timestamps by calling recvmsg() with flag MSG_ERRQUEUE +set and with a msg_control buffer sufficiently large to receive the +relevant metadata structures. The recvmsg call returns the original +outgoing data packet with two ancillary messages attached. + +A message of cm_level SOL_IP(V6) and cm_type IP(V6)_RECVERR +embeds a struct sock_extended_err. This defines the error type. For +timestamps, the ee_errno field is ENOMSG. The other ancillary message +will have cm_level SOL_SOCKET and cm_type SCM_TIMESTAMPING. This +embeds the struct scm_timestamping. + + +2.1.1.2 Timestamp types + +The semantics of the three struct timespec are defined by field +ee_info in the extended error structure. It contains a value of +type SCM_TSTAMP_* to define the actual timestamp passed in +scm_timestamping. + +The SCM_TSTAMP_* types are 1:1 matches to the SOF_TIMESTAMPING_* +control fields discussed previously, with one exception. For legacy +reasons, SCM_TSTAMP_SND is equal to zero and can be set for both +SOF_TIMESTAMPING_TX_HARDWARE and SOF_TIMESTAMPING_TX_SOFTWARE. It +is the first if ts[2] is non-zero, the second otherwise, in which +case the timestamp is stored in ts[0]. + + +2.1.1.3 Fragmentation + +Fragmentation of outgoing datagrams is rare, but is possible, e.g., by +explicitly disabling PMTU discovery. If an outgoing packet is fragmented, +then only the first fragment is timestamped and returned to the sending +socket. + + +2.1.1.4 Packet Payload + +The calling application is often not interested in receiving the whole +packet payload that it passed to the stack originally: the socket +error queue mechanism is just a method to piggyback the timestamp on. +In this case, the application can choose to read datagrams with a +smaller buffer, possibly even of length 0. The payload is truncated +accordingly. Until the process calls recvmsg() on the error queue, +however, the full packet is queued, taking up budget from SO_RCVBUF. + + +2.1.1.5 Blocking Read + +Reading from the error queue is always a non-blocking operation. To +block waiting on a timestamp, use poll or select. poll() will return +POLLERR in pollfd.revents if any data is ready on the error queue. +There is no need to pass this flag in pollfd.events. This flag is +ignored on request. See also `man 2 poll`. + + +2.1.2 Receive timestamps + +On reception, there is no reason to read from the socket error queue. +The SCM_TIMESTAMPING ancillary data is sent along with the packet data +on a normal recvmsg(). Since this is not a socket error, it is not +accompanied by a message SOL_IP(V6)/IP(V6)_RECVERROR. In this case, +the meaning of the three fields in struct scm_timestamping is +implicitly defined. ts[0] holds a software timestamp if set, ts[1] +is again deprecated and ts[2] holds a hardware timestamp if set. + + +3. Hardware Timestamping configuration: SIOCSHWTSTAMP and SIOCGHWTSTAMP Hardware time stamping must also be initialized for each device driver that is expected to do hardware time stamping. The parameter is defined in @@ -167,8 +372,7 @@ enum { */ }; - -DEVICE IMPLEMENTATION +3.1 Hardware Timestamping Implementation: Device Drivers A driver which supports hardware time stamping must support the SIOCSHWTSTAMP ioctl and update the supplied struct hwtstamp_config with diff --git a/Documentation/networking/timestamping/Makefile b/Documentation/networking/timestamping/Makefile index d934afc8306a..95e239c70076 100644 --- a/Documentation/networking/timestamping/Makefile +++ b/Documentation/networking/timestamping/Makefile @@ -1,14 +1,20 @@ +# To compile, from the source root +# +# make headers_install +# make M=documentation + # kbuild trick to avoid linker error. Can be omitted if a module is built. obj- := dummy.o # List of programs to build -hostprogs-y := timestamping hwtstamp_config +hostprogs-y := timestamping txtimestamp hwtstamp_config # Tell kbuild to always build the programs always := $(hostprogs-y) HOSTCFLAGS_timestamping.o += -I$(objtree)/usr/include +HOSTCFLAGS_txtimestamp.o += -I$(objtree)/usr/include HOSTCFLAGS_hwtstamp_config.o += -I$(objtree)/usr/include clean: - rm -f timestamping hwtstamp_config + rm -f timestamping txtimestamp hwtstamp_config diff --git a/Documentation/networking/timestamping/txtimestamp.c b/Documentation/networking/timestamping/txtimestamp.c new file mode 100644 index 000000000000..b32fc2a07734 --- /dev/null +++ b/Documentation/networking/timestamping/txtimestamp.c @@ -0,0 +1,469 @@ +/* + * Copyright 2014 Google Inc. + * Author: willemb@google.com (Willem de Bruijn) + * + * Test software tx timestamping, including + * + * - SCHED, SND and ACK timestamps + * - RAW, UDP and TCP + * - IPv4 and IPv6 + * - various packet sizes (to test GSO and TSO) + * + * Consult the command line arguments for help on running + * the various testcases. + * + * This test requires a dummy TCP server. + * A simple `nc6 [-u] -l -p $DESTPORT` will do + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <arpa/inet.h> +#include <asm/types.h> +#include <error.h> +#include <errno.h> +#include <linux/errqueue.h> +#include <linux/if_ether.h> +#include <linux/net_tstamp.h> +#include <netdb.h> +#include <net/if.h> +#include <netinet/in.h> +#include <netinet/ip.h> +#include <netinet/udp.h> +#include <netinet/tcp.h> +#include <netpacket/packet.h> +#include <poll.h> +#include <stdarg.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/select.h> +#include <sys/socket.h> +#include <sys/time.h> +#include <sys/types.h> +#include <time.h> +#include <unistd.h> + +/* command line parameters */ +static int cfg_proto = SOCK_STREAM; +static int cfg_ipproto = IPPROTO_TCP; +static int cfg_num_pkts = 4; +static int do_ipv4 = 1; +static int do_ipv6 = 1; +static int cfg_payload_len = 10; +static uint16_t dest_port = 9000; + +static struct sockaddr_in daddr; +static struct sockaddr_in6 daddr6; +static struct timespec ts_prev; + +static void __print_timestamp(const char *name, struct timespec *cur, + uint32_t key, int payload_len) +{ + if (!(cur->tv_sec | cur->tv_nsec)) + return; + + fprintf(stderr, " %s: %lu s %lu us (seq=%u, len=%u)", + name, cur->tv_sec, cur->tv_nsec / 1000, + key, payload_len); + + if ((ts_prev.tv_sec | ts_prev.tv_nsec)) { + int64_t cur_ms, prev_ms; + + cur_ms = (long) cur->tv_sec * 1000 * 1000; + cur_ms += cur->tv_nsec / 1000; + + prev_ms = (long) ts_prev.tv_sec * 1000 * 1000; + prev_ms += ts_prev.tv_nsec / 1000; + + fprintf(stderr, " (%+ld us)", cur_ms - prev_ms); + } + + ts_prev = *cur; + fprintf(stderr, "\n"); +} + +static void print_timestamp_usr(void) +{ + struct timespec ts; + struct timeval tv; /* avoid dependency on -lrt */ + + gettimeofday(&tv, NULL); + ts.tv_sec = tv.tv_sec; + ts.tv_nsec = tv.tv_usec * 1000; + + __print_timestamp(" USR", &ts, 0, 0); +} + +static void print_timestamp(struct scm_timestamping *tss, int tstype, + int tskey, int payload_len) +{ + const char *tsname; + + switch (tstype) { + case SCM_TSTAMP_SCHED: + tsname = " ENQ"; + break; + case SCM_TSTAMP_SND: + tsname = " SND"; + break; + case SCM_TSTAMP_ACK: + tsname = " ACK"; + break; + default: + error(1, 0, "unknown timestamp type: %u", + tstype); + } + __print_timestamp(tsname, &tss->ts[0], tskey, payload_len); +} + +static void __poll(int fd) +{ + struct pollfd pollfd; + int ret; + + memset(&pollfd, 0, sizeof(pollfd)); + pollfd.fd = fd; + ret = poll(&pollfd, 1, 100); + if (ret != 1) + error(1, errno, "poll"); +} + +static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len) +{ + struct sock_extended_err *serr = NULL; + struct scm_timestamping *tss = NULL; + struct cmsghdr *cm; + + for (cm = CMSG_FIRSTHDR(msg); + cm && cm->cmsg_len; + cm = CMSG_NXTHDR(msg, cm)) { + if (cm->cmsg_level == SOL_SOCKET && + cm->cmsg_type == SCM_TIMESTAMPING) { + tss = (void *) CMSG_DATA(cm); + } else if ((cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_RECVERR) || + (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_RECVERR)) { + + serr = (void *) CMSG_DATA(cm); + if (serr->ee_errno != ENOMSG || + serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) { + fprintf(stderr, "unknown ip error %d %d\n", + serr->ee_errno, + serr->ee_origin); + serr = NULL; + } + } else + fprintf(stderr, "unknown cmsg %d,%d\n", + cm->cmsg_level, cm->cmsg_type); + } + + if (serr && tss) + print_timestamp(tss, serr->ee_info, serr->ee_data, payload_len); +} + +static int recv_errmsg(int fd) +{ + static char ctrl[1024 /* overprovision*/]; + static struct msghdr msg; + struct iovec entry; + static char *data; + int ret = 0; + + data = malloc(cfg_payload_len); + if (!data) + error(1, 0, "malloc"); + + memset(&msg, 0, sizeof(msg)); + memset(&entry, 0, sizeof(entry)); + memset(ctrl, 0, sizeof(ctrl)); + + entry.iov_base = data; + entry.iov_len = cfg_payload_len; + msg.msg_iov = &entry; + msg.msg_iovlen = 1; + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_control = ctrl; + msg.msg_controllen = sizeof(ctrl); + + ret = recvmsg(fd, &msg, MSG_ERRQUEUE); + if (ret == -1 && errno != EAGAIN) + error(1, errno, "recvmsg"); + + __recv_errmsg_cmsg(&msg, ret); + + free(data); + return ret == -1; +} + +static void do_test(int family, unsigned int opt) +{ + char *buf; + int fd, i, val, total_len; + + if (family == IPPROTO_IPV6 && cfg_proto != SOCK_STREAM) { + /* due to lack of checksum generation code */ + fprintf(stderr, "test: skipping datagram over IPv6\n"); + return; + } + + total_len = cfg_payload_len; + if (cfg_proto == SOCK_RAW) { + total_len += sizeof(struct udphdr); + if (cfg_ipproto == IPPROTO_RAW) + total_len += sizeof(struct iphdr); + } + + buf = malloc(total_len); + if (!buf) + error(1, 0, "malloc"); + + fd = socket(family, cfg_proto, cfg_ipproto); + if (fd < 0) + error(1, errno, "socket"); + + if (cfg_proto == SOCK_STREAM) { + val = 1; + if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, + (char*) &val, sizeof(val))) + error(1, 0, "setsockopt no nagle"); + + if (family == PF_INET) { + if (connect(fd, (void *) &daddr, sizeof(daddr))) + error(1, errno, "connect ipv4"); + } else { + if (connect(fd, (void *) &daddr6, sizeof(daddr6))) + error(1, errno, "connect ipv6"); + } + } + + opt |= SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_OPT_ID; + if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, + (char *) &opt, sizeof(opt))) + error(1, 0, "setsockopt timestamping"); + + for (i = 0; i < cfg_num_pkts; i++) { + memset(&ts_prev, 0, sizeof(ts_prev)); + memset(buf, 'a' + i, total_len); + buf[total_len - 2] = '\n'; + buf[total_len - 1] = '\0'; + + if (cfg_proto == SOCK_RAW) { + struct udphdr *udph; + int off = 0; + + if (cfg_ipproto == IPPROTO_RAW) { + struct iphdr *iph = (void *) buf; + + memset(iph, 0, sizeof(*iph)); + iph->ihl = 5; + iph->version = 4; + iph->ttl = 2; + iph->daddr = daddr.sin_addr.s_addr; + iph->protocol = IPPROTO_UDP; + /* kernel writes saddr, csum, len */ + + off = sizeof(*iph); + } + + udph = (void *) buf + off; + udph->source = ntohs(9000); /* random spoof */ + udph->dest = ntohs(dest_port); + udph->len = ntohs(sizeof(*udph) + cfg_payload_len); + udph->check = 0; /* not allowed for IPv6 */ + } + + print_timestamp_usr(); + if (cfg_proto != SOCK_STREAM) { + if (family == PF_INET) + val = sendto(fd, buf, total_len, 0, (void *) &daddr, sizeof(daddr)); + else + val = sendto(fd, buf, total_len, 0, (void *) &daddr6, sizeof(daddr6)); + } else { + val = send(fd, buf, cfg_payload_len, 0); + } + if (val != total_len) + error(1, errno, "send"); + + /* wait for all errors to be queued, else ACKs arrive OOO */ + usleep(50 * 1000); + + __poll(fd); + + while (!recv_errmsg(fd)) {} + } + + if (close(fd)) + error(1, errno, "close"); + + free(buf); + usleep(400 * 1000); +} + +static void __attribute__((noreturn)) usage(const char *filepath) +{ + fprintf(stderr, "\nUsage: %s [options] hostname\n" + "\nwhere options are:\n" + " -4: only IPv4\n" + " -6: only IPv6\n" + " -h: show this message\n" + " -l N: send N bytes at a time\n" + " -r: use raw\n" + " -R: use raw (IP_HDRINCL)\n" + " -p N: connect to port N\n" + " -u: use udp\n", + filepath); + exit(1); +} + +static void parse_opt(int argc, char **argv) +{ + int proto_count = 0; + char c; + + while ((c = getopt(argc, argv, "46hl:p:rRu")) != -1) { + switch (c) { + case '4': + do_ipv6 = 0; + break; + case '6': + do_ipv4 = 0; + break; + case 'r': + proto_count++; + cfg_proto = SOCK_RAW; + cfg_ipproto = IPPROTO_UDP; + break; + case 'R': + proto_count++; + cfg_proto = SOCK_RAW; + cfg_ipproto = IPPROTO_RAW; + break; + case 'u': + proto_count++; + cfg_proto = SOCK_DGRAM; + cfg_ipproto = IPPROTO_UDP; + break; + case 'l': + cfg_payload_len = strtoul(optarg, NULL, 10); + break; + case 'p': + dest_port = strtoul(optarg, NULL, 10); + break; + case 'h': + default: + usage(argv[0]); + } + } + + if (!cfg_payload_len) + error(1, 0, "payload may not be nonzero"); + if (cfg_proto != SOCK_STREAM && cfg_payload_len > 1472) + error(1, 0, "udp packet might exceed expected MTU"); + if (!do_ipv4 && !do_ipv6) + error(1, 0, "pass -4 or -6, not both"); + if (proto_count > 1) + error(1, 0, "pass -r, -R or -u, not multiple"); + + if (optind != argc - 1) + error(1, 0, "missing required hostname argument"); +} + +static void resolve_hostname(const char *hostname) +{ + struct addrinfo *addrs, *cur; + int have_ipv4 = 0, have_ipv6 = 0; + + if (getaddrinfo(hostname, NULL, NULL, &addrs)) + error(1, errno, "getaddrinfo"); + + cur = addrs; + while (cur && !have_ipv4 && !have_ipv6) { + if (!have_ipv4 && cur->ai_family == AF_INET) { + memcpy(&daddr, cur->ai_addr, sizeof(daddr)); + daddr.sin_port = htons(dest_port); + have_ipv4 = 1; + } + else if (!have_ipv6 && cur->ai_family == AF_INET6) { + memcpy(&daddr6, cur->ai_addr, sizeof(daddr6)); + daddr6.sin6_port = htons(dest_port); + have_ipv6 = 1; + } + cur = cur->ai_next; + } + if (addrs) + freeaddrinfo(addrs); + + do_ipv4 &= have_ipv4; + do_ipv6 &= have_ipv6; +} + +static void do_main(int family) +{ + fprintf(stderr, "family: %s\n", + family == PF_INET ? "INET" : "INET6"); + + fprintf(stderr, "test SND\n"); + do_test(family, SOF_TIMESTAMPING_TX_SOFTWARE); + + fprintf(stderr, "test ENQ\n"); + do_test(family, SOF_TIMESTAMPING_TX_SCHED); + + fprintf(stderr, "test ENQ + SND\n"); + do_test(family, SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_TX_SOFTWARE); + + if (cfg_proto == SOCK_STREAM) { + fprintf(stderr, "\ntest ACK\n"); + do_test(family, SOF_TIMESTAMPING_TX_ACK); + + fprintf(stderr, "\ntest SND + ACK\n"); + do_test(family, SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_ACK); + + fprintf(stderr, "\ntest ENQ + SND + ACK\n"); + do_test(family, SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_ACK); + } +} + +const char *sock_names[] = { NULL, "TCP", "UDP", "RAW" }; + +int main(int argc, char **argv) +{ + if (argc == 1) + usage(argv[0]); + + parse_opt(argc, argv); + resolve_hostname(argv[argc - 1]); + + fprintf(stderr, "protocol: %s\n", sock_names[cfg_proto]); + fprintf(stderr, "payload: %u\n", cfg_payload_len); + fprintf(stderr, "server port: %u\n", dest_port); + fprintf(stderr, "\n"); + + if (do_ipv4) + do_main(PF_INET); + if (do_ipv6) + do_main(PF_INET6); + + return 0; +} diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 9a0319a82470..04892b821157 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -241,6 +241,9 @@ address of the router (or Connected) for internal networks. 6. TIPC ------------------------------------------------------- +tipc_rmem +---------- + The TIPC protocol now has a tunable for the receive memory, similar to the tcp_rmem - i.e. a vector of 3 INTEGERs: (min, default, max) @@ -252,3 +255,16 @@ The max value is set to CONN_OVERLOAD_LIMIT, and the default and min values are scaled (shifted) versions of that same value. Note that the min value is not at this point in time used in any meaningful way, but the triplet is preserved in order to be consistent with things like tcp_rmem. + +named_timeout +-------------- + +TIPC name table updates are distributed asynchronously in a cluster, without +any form of transaction handling. This means that different race scenarios are +possible. One such is that a name withdrawal sent out by one node and received +by another node may arrive after a second, overlapping name publication already +has been accepted from a third node, although the conflicting updates +originally may have been issued in the correct sequential order. +If named_timeout is nonzero, failed topology updates will be placed on a defer +queue until another event arrives that clears the error, or until the timeout +expires. Value is in milliseconds. diff --git a/MAINTAINERS b/MAINTAINERS index 670b3dcce2de..f8db3c3acc67 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -152,8 +152,8 @@ F: drivers/scsi/53c700* 6LOWPAN GENERIC (BTLE/IEEE 802.15.4) M: Alexander Aring <alex.aring@gmail.com> -L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-bluetooth@vger.kernel.org +L: linux-wpan@vger.kernel.org S: Maintained F: net/6lowpan/ F: include/net/6lowpan.h @@ -4597,13 +4597,14 @@ F: drivers/idle/i7300_idle.c IEEE 802.15.4 SUBSYSTEM M: Alexander Aring <alex.aring@gmail.com> -L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) -W: http://apps.sourceforge.net/trac/linux-zigbee -T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git +L: linux-wpan@vger.kernel.org +W: https://github.com/linux-wpan +T: git git://github.com/linux-wpan/linux-wpan-next.git S: Maintained F: net/ieee802154/ F: net/mac802154/ F: drivers/net/ieee802154/ +F: Documentation/networking/ieee802154.txt IGUANAWORKS USB IR TRANSCEIVER M: Sean Young <sean@mess.org> @@ -6373,7 +6374,7 @@ M: Lauro Ramos Venancio <lauro.venancio@openbossa.org> M: Aloisio Almeida Jr <aloisio.almeida@openbossa.org> M: Samuel Ortiz <sameo@linux.intel.com> L: linux-wireless@vger.kernel.org -L: linux-nfc@lists.01.org (moderated for non-subscribers) +L: linux-nfc@lists.01.org (subscribers-only) S: Supported F: net/nfc/ F: include/net/nfc/ @@ -7379,15 +7380,15 @@ F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER M: Shahed Shaikh <shahed.shaikh@qlogic.com> -M: Dept-HSGLinuxNICDev@qlogic.com +M: Dept-GELinuxNICDev@qlogic.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/qlogic/qlcnic/ QLOGIC QLGE 10Gb ETHERNET DRIVER -M: Shahed Shaikh <shahed.shaikh@qlogic.com> -M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> -M: Ron Mercer <ron.mercer@qlogic.com> +M: Harish Patil <harish.patil@qlogic.com> +M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com> +M: Dept-GELinuxNICDev@qlogic.com M: linux-driver@qlogic.com L: netdev@vger.kernel.org S: Supported diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index f4b9da65bc0f..0a03260c1707 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -776,6 +776,8 @@ <&clks IMX6SX_CLK_ENET_PTP>; clock-names = "ipg", "ahb", "ptp", "enet_clk_ref", "enet_out"; + fsl,num-tx-queues=<3>; + fsl,num-rx-queues=<3>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts index 5e4e3c238b2d..d63d685ea6cc 100644 --- a/arch/arm/boot/dts/rk3188-radxarock.dts +++ b/arch/arm/boot/dts/rk3188-radxarock.dts @@ -76,6 +76,22 @@ }; }; +&emac { + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&emac_xfer>, <&emac_mdio>, <&phy_int>; + + phy = <&phy0>; + phy-supply = <&vcc_rmii>; + + phy0: ethernet-phy@0 { + reg = <0>; + interrupt-parent = <&gpio3>; + interrupts = <26 IRQ_TYPE_LEVEL_LOW>; + }; +}; + &i2c1 { status = "okay"; clock-frequency = <400000>; @@ -201,6 +217,12 @@ }; }; + lan8720a { + phy_int: phy-int { + rockchip,pins = <RK_GPIO3 26 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + ir-receiver { ir_recv_pin: ir-recv-pin { rockchip,pins = <RK_GPIO0 10 RK_FUNC_GPIO &pcfg_pull_none>; diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index ee801a9c6b74..7f25bc51f2ee 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi @@ -147,6 +147,24 @@ bias-disable; }; + emac { + emac_xfer: emac-xfer { + rockchip,pins = <RK_GPIO3 16 RK_FUNC_2 &pcfg_pull_none>, /* tx_en */ + <RK_GPIO3 17 RK_FUNC_2 &pcfg_pull_none>, /* txd1 */ + <RK_GPIO3 18 RK_FUNC_2 &pcfg_pull_none>, /* txd0 */ + <RK_GPIO3 19 RK_FUNC_2 &pcfg_pull_none>, /* rxd0 */ + <RK_GPIO3 20 RK_FUNC_2 &pcfg_pull_none>, /* rxd1 */ + <RK_GPIO3 21 RK_FUNC_2 &pcfg_pull_none>, /* mac_clk */ + <RK_GPIO3 22 RK_FUNC_2 &pcfg_pull_none>, /* rx_err */ + <RK_GPIO3 23 RK_FUNC_2 &pcfg_pull_none>; /* crs_dvalid */ + }; + + emac_mdio: emac-mdio { + rockchip,pins = <RK_GPIO3 24 RK_FUNC_2 &pcfg_pull_none>, + <RK_GPIO3 25 RK_FUNC_2 &pcfg_pull_none>; + }; + }; + i2c0 { i2c0_xfer: i2c0-xfer { rockchip,pins = <RK_GPIO1 24 RK_FUNC_1 &pcfg_pull_none>, @@ -323,6 +341,10 @@ }; }; +&emac { + compatible = "rockchip,rk3188-emac"; +}; + &global_timer { interrupts = <GIC_PPI 11 0xf04>; }; diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi index 8caf85d83901..208b1df6bcb0 100644 --- a/arch/arm/boot/dts/rk3xxx.dtsi +++ b/arch/arm/boot/dts/rk3xxx.dtsi @@ -91,6 +91,23 @@ status = "disabled"; }; + emac: ethernet@10204000 { + compatible = "snps,arc-emac"; + reg = <0x10204000 0x3c>; + interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + + rockchip,grf = <&grf>; + + clocks = <&cru HCLK_EMAC>, <&cru SCLK_MAC>; + clock-names = "hclk", "macref"; + max-speed = <100>; + phy-mode = "rmii"; + + status = "disabled"; + }; + mmc0: dwmmc@10214000 { compatible = "rockchip,rk2928-dw-mshc"; reg = <0x10214000 0x1000>; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index a37b989a2f91..6b45f649eff0 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -12,7 +12,6 @@ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/filter.h> -#include <linux/moduleloader.h> #include <linux/netdevice.h> #include <linux/string.h> #include <linux/slab.h> @@ -174,6 +173,15 @@ static inline bool is_load_to_a(u16 inst) } } +static void jit_fill_hole(void *area, unsigned int size) +{ + /* Insert illegal UND instructions. */ + u32 *ptr, fill_ins = 0xe7ffffff; + /* We are guaranteed to have aligned memory. */ + for (ptr = area; size >= sizeof(u32); size -= sizeof(u32)) + *ptr++ = fill_ins; +} + static void build_prologue(struct jit_ctx *ctx) { u16 reg_set = saved_regs(ctx); @@ -859,9 +867,11 @@ b_epilogue: void bpf_jit_compile(struct bpf_prog *fp) { + struct bpf_binary_header *header; struct jit_ctx ctx; unsigned tmp_idx; unsigned alloc_size; + u8 *target_ptr; if (!bpf_jit_enable) return; @@ -897,13 +907,15 @@ void bpf_jit_compile(struct bpf_prog *fp) /* there's nothing after the epilogue on ARMv7 */ build_epilogue(&ctx); #endif - alloc_size = 4 * ctx.idx; - ctx.target = module_alloc(alloc_size); - if (unlikely(ctx.target == NULL)) + header = bpf_jit_binary_alloc(alloc_size, &target_ptr, + 4, jit_fill_hole); + if (header == NULL) goto out; + ctx.target = (u32 *) target_ptr; ctx.idx = 0; + build_prologue(&ctx); build_body(&ctx); build_epilogue(&ctx); @@ -919,8 +931,9 @@ void bpf_jit_compile(struct bpf_prog *fp) /* there are 2 passes here */ bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); + set_memory_ro((unsigned long)header, header->pages); fp->bpf_func = (void *)ctx.target; - fp->jited = 1; + fp->jited = true; out: kfree(ctx.offsets); return; @@ -928,7 +941,15 @@ out: void bpf_jit_free(struct bpf_prog *fp) { - if (fp->jited) - module_free(NULL, fp->bpf_func); - kfree(fp); + unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; + struct bpf_binary_header *header = (void *)addr; + + if (!fp->jited) + goto free_filter; + + set_memory_rw(addr, header->pages); + bpf_jit_binary_free(header); + +free_filter: + bpf_prog_unlock_free(fp); } diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 3ec6e8e8d368..f5b00f41c4f6 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -499,7 +499,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq) d->netdev = &orion_ge00.dev; for (i = 0; i < d->nr_chips; i++) - d->chip[i].mii_bus = &orion_ge00_shared.dev; + d->chip[i].host_dev = &orion_ge00_shared.dev; orion_switch_device.dev.platform_data = d; platform_device_register(&orion_switch_device); diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 9f7ecbda250c..64ecf9a4e526 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -765,27 +765,6 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) return (u64)err << 32 | ntohl(ret); } -#ifdef __BIG_ENDIAN_BITFIELD -#define PKT_TYPE_MAX (7 << 5) -#else -#define PKT_TYPE_MAX 7 -#endif -static int pkt_type_offset(void) -{ - struct sk_buff skb_probe = { - .pkt_type = ~0, - }; - u8 *ct = (u8 *)&skb_probe; - unsigned int off; - - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (ct[off] == PKT_TYPE_MAX) - return off; - } - pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n"); - return -1; -} - static int build_body(struct jit_ctx *ctx) { void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; @@ -1333,11 +1312,7 @@ jmp_cmp: case BPF_ANC | SKF_AD_PKTTYPE: ctx->flags |= SEEN_SKB; - tmp = off = pkt_type_offset(); - - if (tmp < 0) - return -1; - emit_load_byte(r_tmp, r_skb, off, ctx); + emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx); /* Keep only the last 3 bits */ emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); #ifdef __BIG_ENDIAN_BITFIELD @@ -1418,7 +1393,7 @@ void bpf_jit_compile(struct bpf_prog *fp) bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); fp->bpf_func = (void *)ctx.target; - fp->jited = 1; + fp->jited = true; out: kfree(ctx.offsets); @@ -1428,5 +1403,6 @@ void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) module_free(NULL, fp->bpf_func); - kfree(fp); + + bpf_prog_unlock_free(fp); } diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 3afa6f4c1957..cbae2dfd053c 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -686,7 +686,7 @@ void bpf_jit_compile(struct bpf_prog *fp) ((u64 *)image)[0] = (u64)code_base; ((u64 *)image)[1] = local_paca->kernel_toc; fp->bpf_func = (void *)image; - fp->jited = 1; + fp->jited = true; } out: kfree(addrs); @@ -697,5 +697,6 @@ void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) module_free(NULL, fp->bpf_func); - kfree(fp); + + bpf_prog_unlock_free(fp); } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 61e45b7c04d7..c52ac77408ca 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -5,11 +5,9 @@ * * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#include <linux/moduleloader.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/filter.h> -#include <linux/random.h> #include <linux/init.h> #include <asm/cacheflush.h> #include <asm/facility.h> @@ -148,6 +146,12 @@ struct bpf_jit { ret; \ }) +static void bpf_jit_fill_hole(void *area, unsigned int size) +{ + /* Fill whole space with illegal instructions */ + memset(area, 0, size); +} + static void bpf_jit_prologue(struct bpf_jit *jit) { /* Save registers and create stack frame if necessary */ @@ -223,37 +227,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit) EMIT2(0x07fe); } -/* Helper to find the offset of pkt_type in sk_buff - * Make sure its still a 3bit field starting at the MSBs within a byte. - */ -#define PKT_TYPE_MAX 0xe0 -static int pkt_type_offset; - -static int __init bpf_pkt_type_offset_init(void) -{ - struct sk_buff skb_probe = { - .pkt_type = ~0, - }; - char *ct = (char *)&skb_probe; - int off; - - pkt_type_offset = -1; - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (!ct[off]) - continue; - if (ct[off] == PKT_TYPE_MAX) - pkt_type_offset = off; - else { - /* Found non matching bit pattern, fix needed. */ - WARN_ON_ONCE(1); - pkt_type_offset = -1; - return -1; - } - } - return 0; -} -device_initcall(bpf_pkt_type_offset_init); - /* * make sure we dont leak kernel information to user */ @@ -753,12 +726,10 @@ call_fn: /* lg %r1,<d(function)>(%r13) */ } break; case BPF_ANC | SKF_AD_PKTTYPE: - if (pkt_type_offset < 0) - goto out; /* lhi %r5,0 */ EMIT4(0xa7580000); /* ic %r5,<d(pkt_type_offset)>(%r2) */ - EMIT4_DISP(0x43502000, pkt_type_offset); + EMIT4_DISP(0x43502000, PKT_TYPE_OFFSET()); /* srl %r5,5 */ EMIT4_DISP(0x88500000, 5); break; @@ -780,38 +751,6 @@ out: return -1; } -/* - * Note: for security reasons, bpf code will follow a randomly - * sized amount of illegal instructions. - */ -struct bpf_binary_header { - unsigned int pages; - u8 image[]; -}; - -static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize, - u8 **image_ptr) -{ - struct bpf_binary_header *header; - unsigned int sz, hole; - - /* Most BPF filters are really small, but if some of them fill a page, - * allow at least 128 extra bytes for illegal instructions. - */ - sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE); - header = module_alloc(sz); - if (!header) - return NULL; - memset(header, 0, sz); - header->pages = sz / PAGE_SIZE; - hole = min(sz - (bpfsize + sizeof(*header)), PAGE_SIZE - sizeof(*header)); - /* Insert random number of illegal instructions before BPF code - * and make sure the first instruction starts at an even address. - */ - *image_ptr = &header->image[(prandom_u32() % hole) & -2]; - return header; -} - void bpf_jit_compile(struct bpf_prog *fp) { struct bpf_binary_header *header = NULL; @@ -850,7 +789,8 @@ void bpf_jit_compile(struct bpf_prog *fp) size = prg_len + lit_len; if (size >= BPF_SIZE_MAX) goto out; - header = bpf_alloc_binary(size, &jit.start); + header = bpf_jit_binary_alloc(size, &jit.start, + 2, bpf_jit_fill_hole); if (!header) goto out; jit.prg = jit.mid = jit.start + prg_len; @@ -869,7 +809,7 @@ void bpf_jit_compile(struct bpf_prog *fp) if (jit.start) { set_memory_ro((unsigned long)header, header->pages); fp->bpf_func = (void *) jit.start; - fp->jited = 1; + fp->jited = true; } out: kfree(addrs); @@ -884,8 +824,8 @@ void bpf_jit_free(struct bpf_prog *fp) goto free_filter; set_memory_rw(addr, header->pages); - module_free(NULL, header); + bpf_jit_binary_free(header); free_filter: - kfree(fp); + bpf_prog_unlock_free(fp); } diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index 51ae87b483e0..7c06f18150ab 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -585,16 +585,11 @@ void bpf_jit_compile(struct bpf_prog *fp) case BPF_ANC | SKF_AD_PROTOCOL: emit_skb_load16(protocol, r_A); break; -#if 0 - /* GCC won't let us take the address of - * a bit field even though we very much - * know what we are doing here. - */ case BPF_ANC | SKF_AD_PKTTYPE: - __emit_skb_load8(pkt_type, r_A); + __emit_skb_load8(__pkt_type_offset, r_A); + emit_andi(r_A, PKT_TYPE_MAX, r_A); emit_alu_K(SRL, 5); break; -#endif case BPF_ANC | SKF_AD_IFINDEX: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); @@ -812,7 +807,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; if (image) { bpf_flush_icache(image, image + proglen); fp->bpf_func = (void *)image; - fp->jited = 1; + fp->jited = true; } out: kfree(addrs); @@ -823,5 +818,6 @@ void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) module_free(NULL, fp->bpf_func); - kfree(fp); + + bpf_prog_unlock_free(fp); } diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5c8cb8043c5a..d56cd1f515bd 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -8,12 +8,10 @@ * as published by the Free Software Foundation; version 2 * of the License. */ -#include <linux/moduleloader.h> -#include <asm/cacheflush.h> #include <linux/netdevice.h> #include <linux/filter.h> #include <linux/if_vlan.h> -#include <linux/random.h> +#include <asm/cacheflush.h> int bpf_jit_enable __read_mostly; @@ -109,39 +107,6 @@ static inline void bpf_flush_icache(void *start, void *end) #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) -struct bpf_binary_header { - unsigned int pages; - /* Note : for security reasons, bpf code will follow a randomly - * sized amount of int3 instructions - */ - u8 image[]; -}; - -static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, - u8 **image_ptr) -{ - unsigned int sz, hole; - struct bpf_binary_header *header; - - /* Most of BPF filters are really small, - * but if some of them fill a page, allow at least - * 128 extra bytes to insert a random section of int3 - */ - sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE); - header = module_alloc(sz); - if (!header) - return NULL; - - memset(header, 0xcc, sz); /* fill whole space with int3 instructions */ - - header->pages = sz / PAGE_SIZE; - hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header)); - - /* insert a random number of int3 instructions before BPF code */ - *image_ptr = &header->image[prandom_u32() % hole]; - return header; -} - /* pick a register outside of BPF range for JIT internal work */ #define AUX_REG (MAX_BPF_REG + 1) @@ -206,6 +171,12 @@ static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); } +static void jit_fill_hole(void *area, unsigned int size) +{ + /* fill whole space with int3 instructions */ + memset(area, 0xcc, size); +} + struct jit_context { unsigned int cleanup_addr; /* epilogue code offset */ bool seen_ld_abs; @@ -393,6 +364,23 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); break; + case BPF_LD | BPF_IMM | BPF_DW: + if (insn[1].code != 0 || insn[1].src_reg != 0 || + insn[1].dst_reg != 0 || insn[1].off != 0) { + /* verifier must catch invalid insns */ + pr_err("invalid BPF_LD_IMM64 insn\n"); + return -EINVAL; + } + + /* movabsq %rax, imm64 */ + EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); + EMIT(insn[0].imm, 4); + EMIT(insn[1].imm, 4); + + insn++; + i++; + break; + /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU | BPF_DIV | BPF_X: @@ -515,6 +503,48 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); break; + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + + /* check for bad case when dst_reg == rcx */ + if (dst_reg == BPF_REG_4) { + /* mov r11, dst_reg */ + EMIT_mov(AUX_REG, dst_reg); + dst_reg = AUX_REG; + } + + if (src_reg != BPF_REG_4) { /* common case */ + EMIT1(0x51); /* push rcx */ + + /* mov rcx, src_reg */ + EMIT_mov(BPF_REG_4, src_reg); + } + + /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); + + switch (BPF_OP(insn->code)) { + case BPF_LSH: b3 = 0xE0; break; + case BPF_RSH: b3 = 0xE8; break; + case BPF_ARSH: b3 = 0xF8; break; + } + EMIT2(0xD3, add_1reg(b3, dst_reg)); + + if (src_reg != BPF_REG_4) + EMIT1(0x59); /* pop rcx */ + + if (insn->dst_reg == BPF_REG_4) + /* mov dst_reg, r11 */ + EMIT_mov(insn->dst_reg, AUX_REG); + break; + case BPF_ALU | BPF_END | BPF_FROM_BE: switch (imm32) { case 16: @@ -900,7 +930,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) if (proglen <= 0) { image = NULL; if (header) - module_free(NULL, header); + bpf_jit_binary_free(header); goto out; } if (image) { @@ -910,7 +940,8 @@ void bpf_int_jit_compile(struct bpf_prog *prog) break; } if (proglen == oldproglen) { - header = bpf_alloc_binary(proglen, &image); + header = bpf_jit_binary_alloc(proglen, &image, + 1, jit_fill_hole); if (!header) goto out; } @@ -924,29 +955,23 @@ void bpf_int_jit_compile(struct bpf_prog *prog) bpf_flush_icache(header, image + proglen); set_memory_ro((unsigned long)header, header->pages); prog->bpf_func = (void *)image; - prog->jited = 1; + prog->jited = true; } out: kfree(addrs); } -static void bpf_jit_free_deferred(struct work_struct *work) +void bpf_jit_free(struct bpf_prog *fp) { - struct bpf_prog *fp = container_of(work, struct bpf_prog, work); unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; struct bpf_binary_header *header = (void *)addr; + if (!fp->jited) + goto free_filter; + set_memory_rw(addr, header->pages); - module_free(NULL, header); - kfree(fp); -} + bpf_jit_binary_free(header); -void bpf_jit_free(struct bpf_prog *fp) -{ - if (fp->jited) { - INIT_WORK(&fp->work, bpf_jit_free_deferred); - schedule_work(&fp->work); - } else { - kfree(fp); - } +free_filter: + bpf_prog_unlock_free(fp); } diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 11115bbe115c..004d6aa671ce 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -21,6 +21,14 @@ #include <linux/serial_reg.h> #include <linux/time.h> +enum bcma_boot_dev { + BCMA_BOOT_DEV_UNK = 0, + BCMA_BOOT_DEV_ROM, + BCMA_BOOT_DEV_PARALLEL, + BCMA_BOOT_DEV_SERIAL, + BCMA_BOOT_DEV_NAND, +}; + static const char * const part_probes[] = { "bcm47xxpart", NULL }; static struct physmap_flash_data bcma_pflash_data = { @@ -229,11 +237,51 @@ u32 bcma_cpu_clock(struct bcma_drv_mips *mcore) } EXPORT_SYMBOL(bcma_cpu_clock); +static enum bcma_boot_dev bcma_boot_dev(struct bcma_bus *bus) +{ + struct bcma_drv_cc *cc = &bus->drv_cc; + u8 cc_rev = cc->core->id.rev; + + if (cc_rev == 42) { + struct bcma_device *core; + + core = bcma_find_core(bus, BCMA_CORE_NS_ROM); + if (core) { + switch (bcma_aread32(core, BCMA_IOST) & + BCMA_NS_ROM_IOST_BOOT_DEV_MASK) { + case BCMA_NS_ROM_IOST_BOOT_DEV_NOR: + return BCMA_BOOT_DEV_SERIAL; + case BCMA_NS_ROM_IOST_BOOT_DEV_NAND: + return BCMA_BOOT_DEV_NAND; + case BCMA_NS_ROM_IOST_BOOT_DEV_ROM: + default: + return BCMA_BOOT_DEV_ROM; + } + } + } else { + if (cc_rev == 38) { + if (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT) + return BCMA_BOOT_DEV_NAND; + else if (cc->status & BIT(5)) + return BCMA_BOOT_DEV_ROM; + } + + if ((cc->capabilities & BCMA_CC_CAP_FLASHT) == + BCMA_CC_FLASHT_PARA) + return BCMA_BOOT_DEV_PARALLEL; + else + return BCMA_BOOT_DEV_SERIAL; + } + + return BCMA_BOOT_DEV_SERIAL; +} + static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) { struct bcma_bus *bus = mcore->core->bus; struct bcma_drv_cc *cc = &bus->drv_cc; struct bcma_pflash *pflash = &cc->pflash; + enum bcma_boot_dev boot_dev; switch (cc->capabilities & BCMA_CC_CAP_FLASHT) { case BCMA_CC_FLASHT_STSER: @@ -269,6 +317,20 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) bcma_nflash_init(cc); } } + + /* Determine flash type this SoC boots from */ + boot_dev = bcma_boot_dev(bus); + switch (boot_dev) { + case BCMA_BOOT_DEV_PARALLEL: + case BCMA_BOOT_DEV_SERIAL: + /* TODO: Init NVRAM using BCMA_SOC_FLASH2 window */ + break; + case BCMA_BOOT_DEV_NAND: + /* TODO: Init NVRAM using BCMA_SOC_FLASH1 window */ + break; + default: + break; + } } void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c index 3475e600011a..1edd7e064621 100644 --- a/drivers/bcma/host_soc.c +++ b/drivers/bcma/host_soc.c @@ -134,12 +134,16 @@ static void bcma_host_soc_block_write(struct bcma_device *core, static u32 bcma_host_soc_aread32(struct bcma_device *core, u16 offset) { + if (WARN_ONCE(!core->io_wrap, "Accessed core has no wrapper/agent\n")) + return ~0; return readl(core->io_wrap + offset); } static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset, u32 value) { + if (WARN_ONCE(!core->io_wrap, "Accessed core has no wrapper/agent\n")) + return; writel(value, core->io_wrap + offset); } diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index b4764c6bcf17..e9bd77249a4c 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -421,10 +421,13 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE); if (!core->io_addr) return -ENOMEM; - core->io_wrap = ioremap_nocache(core->wrap, BCMA_CORE_SIZE); - if (!core->io_wrap) { - iounmap(core->io_addr); - return -ENOMEM; + if (core->wrap) { + core->io_wrap = ioremap_nocache(core->wrap, + BCMA_CORE_SIZE); + if (!core->io_wrap) { + iounmap(core->io_addr); + return -ENOMEM; + } } } return 0; diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a0d7355ef127..d85ced27ebd5 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x04CA, 0x300b) }, { USB_DEVICE(0x0930, 0x0219) }, { USB_DEVICE(0x0930, 0x0220) }, + { USB_DEVICE(0x0930, 0x0227) }, { USB_DEVICE(0x0b05, 0x17d0) }, { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, @@ -138,6 +139,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index dfa5043e68ba..35e63aaa6f80 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -61,7 +61,7 @@ MODULE_LICENSE("GPL"); /* ======================== Local structures ======================== */ -typedef struct bluecard_info_t { +struct bluecard_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; @@ -78,7 +78,7 @@ typedef struct bluecard_info_t { unsigned char ctrl_reg; unsigned long hw_state; /* Status of the hardware and LED control */ -} bluecard_info_t; +}; static int bluecard_config(struct pcmcia_device *link); @@ -157,7 +157,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev); static void bluecard_activity_led_timeout(u_long arg) { - bluecard_info_t *info = (bluecard_info_t *)arg; + struct bluecard_info *info = (struct bluecard_info *)arg; unsigned int iobase = info->p_dev->resource[0]->start; if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) @@ -173,7 +173,7 @@ static void bluecard_activity_led_timeout(u_long arg) } -static void bluecard_enable_activity_led(bluecard_info_t *info) +static void bluecard_enable_activity_led(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; @@ -215,7 +215,7 @@ static int bluecard_write(unsigned int iobase, unsigned int offset, __u8 *buf, i } -static void bluecard_write_wakeup(bluecard_info_t *info) +static void bluecard_write_wakeup(struct bluecard_info *info) { if (!info) { BT_ERR("Unknown device"); @@ -368,7 +368,8 @@ static int bluecard_read(unsigned int iobase, unsigned int offset, __u8 *buf, in } -static void bluecard_receive(bluecard_info_t *info, unsigned int offset) +static void bluecard_receive(struct bluecard_info *info, + unsigned int offset) { unsigned int iobase; unsigned char buf[31]; @@ -497,7 +498,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset) static irqreturn_t bluecard_interrupt(int irq, void *dev_inst) { - bluecard_info_t *info = dev_inst; + struct bluecard_info *info = dev_inst; unsigned int iobase; unsigned char reg; @@ -562,7 +563,7 @@ static irqreturn_t bluecard_interrupt(int irq, void *dev_inst) static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); struct sk_buff *skb; /* Ericsson baud rate command */ @@ -611,7 +612,7 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) static int bluecard_hci_flush(struct hci_dev *hdev) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); @@ -622,7 +623,7 @@ static int bluecard_hci_flush(struct hci_dev *hdev) static int bluecard_hci_open(struct hci_dev *hdev) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); @@ -643,7 +644,7 @@ static int bluecard_hci_open(struct hci_dev *hdev) static int bluecard_hci_close(struct hci_dev *hdev) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) return 0; @@ -663,7 +664,7 @@ static int bluecard_hci_close(struct hci_dev *hdev) static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - bluecard_info_t *info = hci_get_drvdata(hdev); + struct bluecard_info *info = hci_get_drvdata(hdev); switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: @@ -691,7 +692,7 @@ static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* ======================== Card services HCI interaction ======================== */ -static int bluecard_open(bluecard_info_t *info) +static int bluecard_open(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev; @@ -806,7 +807,7 @@ static int bluecard_open(bluecard_info_t *info) } -static int bluecard_close(bluecard_info_t *info) +static int bluecard_close(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev = info->hdev; @@ -833,7 +834,7 @@ static int bluecard_close(bluecard_info_t *info) static int bluecard_probe(struct pcmcia_device *link) { - bluecard_info_t *info; + struct bluecard_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); @@ -857,7 +858,7 @@ static void bluecard_detach(struct pcmcia_device *link) static int bluecard_config(struct pcmcia_device *link) { - bluecard_info_t *info = link->priv; + struct bluecard_info *info = link->priv; int i, n; link->config_index = 0x20; @@ -897,7 +898,7 @@ failed: static void bluecard_release(struct pcmcia_device *link) { - bluecard_info_t *info = link->priv; + struct bluecard_info *info = link->priv; bluecard_close(info); diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 1d82721cf9c6..4f7e8d400bc0 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -67,7 +67,7 @@ MODULE_FIRMWARE("BT3CPCC.bin"); /* ======================== Local structures ======================== */ -typedef struct bt3c_info_t { +struct bt3c_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; @@ -80,7 +80,7 @@ typedef struct bt3c_info_t { unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; -} bt3c_info_t; +}; static int bt3c_config(struct pcmcia_device *link); @@ -175,7 +175,7 @@ static int bt3c_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) } -static void bt3c_write_wakeup(bt3c_info_t *info) +static void bt3c_write_wakeup(struct bt3c_info *info) { if (!info) { BT_ERR("Unknown device"); @@ -214,7 +214,7 @@ static void bt3c_write_wakeup(bt3c_info_t *info) } -static void bt3c_receive(bt3c_info_t *info) +static void bt3c_receive(struct bt3c_info *info) { unsigned int iobase; int size = 0, avail; @@ -336,7 +336,7 @@ static void bt3c_receive(bt3c_info_t *info) static irqreturn_t bt3c_interrupt(int irq, void *dev_inst) { - bt3c_info_t *info = dev_inst; + struct bt3c_info *info = dev_inst; unsigned int iobase; int iir; irqreturn_t r = IRQ_NONE; @@ -388,7 +388,7 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst) static int bt3c_hci_flush(struct hci_dev *hdev) { - bt3c_info_t *info = hci_get_drvdata(hdev); + struct bt3c_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); @@ -418,7 +418,7 @@ static int bt3c_hci_close(struct hci_dev *hdev) static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - bt3c_info_t *info = hci_get_drvdata(hdev); + struct bt3c_info *info = hci_get_drvdata(hdev); unsigned long flags; switch (bt_cb(skb)->pkt_type) { @@ -451,7 +451,8 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* ======================== Card services HCI interaction ======================== */ -static int bt3c_load_firmware(bt3c_info_t *info, const unsigned char *firmware, +static int bt3c_load_firmware(struct bt3c_info *info, + const unsigned char *firmware, int count) { char *ptr = (char *) firmware; @@ -536,7 +537,7 @@ error: } -static int bt3c_open(bt3c_info_t *info) +static int bt3c_open(struct bt3c_info *info) { const struct firmware *firmware; struct hci_dev *hdev; @@ -603,7 +604,7 @@ error: } -static int bt3c_close(bt3c_info_t *info) +static int bt3c_close(struct bt3c_info *info) { struct hci_dev *hdev = info->hdev; @@ -620,7 +621,7 @@ static int bt3c_close(bt3c_info_t *info) static int bt3c_probe(struct pcmcia_device *link) { - bt3c_info_t *info; + struct bt3c_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); @@ -683,7 +684,7 @@ static int bt3c_check_config_notpicky(struct pcmcia_device *p_dev, static int bt3c_config(struct pcmcia_device *link) { - bt3c_info_t *info = link->priv; + struct bt3c_info *info = link->priv; int i; unsigned long try; @@ -724,7 +725,7 @@ failed: static void bt3c_release(struct pcmcia_device *link) { - bt3c_info_t *info = link->priv; + struct bt3c_info *info = link->priv; bt3c_close(info); diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index fb948f02eda5..abb4d2106db4 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c @@ -62,7 +62,7 @@ MODULE_LICENSE("GPL"); /* ======================== Local structures ======================== */ -typedef struct btuart_info_t { +struct btuart_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; @@ -75,7 +75,7 @@ typedef struct btuart_info_t { unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; -} btuart_info_t; +}; static int btuart_config(struct pcmcia_device *link); @@ -127,7 +127,7 @@ static int btuart_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) } -static void btuart_write_wakeup(btuart_info_t *info) +static void btuart_write_wakeup(struct btuart_info *info) { if (!info) { BT_ERR("Unknown device"); @@ -172,7 +172,7 @@ static void btuart_write_wakeup(btuart_info_t *info) } -static void btuart_receive(btuart_info_t *info) +static void btuart_receive(struct btuart_info *info) { unsigned int iobase; int boguscount = 0; @@ -286,7 +286,7 @@ static void btuart_receive(btuart_info_t *info) static irqreturn_t btuart_interrupt(int irq, void *dev_inst) { - btuart_info_t *info = dev_inst; + struct btuart_info *info = dev_inst; unsigned int iobase; int boguscount = 0; int iir, lsr; @@ -340,7 +340,8 @@ static irqreturn_t btuart_interrupt(int irq, void *dev_inst) } -static void btuart_change_speed(btuart_info_t *info, unsigned int speed) +static void btuart_change_speed(struct btuart_info *info, + unsigned int speed) { unsigned long flags; unsigned int iobase; @@ -397,7 +398,7 @@ static void btuart_change_speed(btuart_info_t *info, unsigned int speed) static int btuart_hci_flush(struct hci_dev *hdev) { - btuart_info_t *info = hci_get_drvdata(hdev); + struct btuart_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); @@ -427,7 +428,7 @@ static int btuart_hci_close(struct hci_dev *hdev) static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - btuart_info_t *info = hci_get_drvdata(hdev); + struct btuart_info *info = hci_get_drvdata(hdev); switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: @@ -455,7 +456,7 @@ static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* ======================== Card services HCI interaction ======================== */ -static int btuart_open(btuart_info_t *info) +static int btuart_open(struct btuart_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; @@ -521,7 +522,7 @@ static int btuart_open(btuart_info_t *info) } -static int btuart_close(btuart_info_t *info) +static int btuart_close(struct btuart_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; @@ -550,7 +551,7 @@ static int btuart_close(btuart_info_t *info) static int btuart_probe(struct pcmcia_device *link) { - btuart_info_t *info; + struct btuart_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); @@ -613,7 +614,7 @@ static int btuart_check_config_notpicky(struct pcmcia_device *p_dev, static int btuart_config(struct pcmcia_device *link) { - btuart_info_t *info = link->priv; + struct btuart_info *info = link->priv; int i; int try; @@ -654,7 +655,7 @@ failed: static void btuart_release(struct pcmcia_device *link) { - btuart_info_t *info = link->priv; + struct btuart_info *info = link->priv; btuart_close(info); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 292c38e8aa17..0527b29c3954 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -165,6 +165,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index 2bd8fad17206..78e10f0c65b2 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c @@ -62,7 +62,7 @@ MODULE_LICENSE("GPL"); /* ======================== Local structures ======================== */ -typedef struct dtl1_info_t { +struct dtl1_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; @@ -78,7 +78,7 @@ typedef struct dtl1_info_t { unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; -} dtl1_info_t; +}; static int dtl1_config(struct pcmcia_device *link); @@ -94,11 +94,11 @@ static int dtl1_config(struct pcmcia_device *link); #define RECV_WAIT_DATA 1 -typedef struct { +struct nsh { u8 type; u8 zero; u16 len; -} __packed nsh_t; /* Nokia Specific Header */ +} __packed; /* Nokia Specific Header */ #define NSHL 4 /* Nokia Specific Header Length */ @@ -126,7 +126,7 @@ static int dtl1_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) } -static void dtl1_write_wakeup(dtl1_info_t *info) +static void dtl1_write_wakeup(struct dtl1_info *info) { if (!info) { BT_ERR("Unknown device"); @@ -176,7 +176,7 @@ static void dtl1_write_wakeup(dtl1_info_t *info) } -static void dtl1_control(dtl1_info_t *info, struct sk_buff *skb) +static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb) { u8 flowmask = *(u8 *)skb->data; int i; @@ -199,10 +199,10 @@ static void dtl1_control(dtl1_info_t *info, struct sk_buff *skb) } -static void dtl1_receive(dtl1_info_t *info) +static void dtl1_receive(struct dtl1_info *info) { unsigned int iobase; - nsh_t *nsh; + struct nsh *nsh; int boguscount = 0; if (!info) { @@ -227,7 +227,7 @@ static void dtl1_receive(dtl1_info_t *info) } *skb_put(info->rx_skb, 1) = inb(iobase + UART_RX); - nsh = (nsh_t *)info->rx_skb->data; + nsh = (struct nsh *)info->rx_skb->data; info->rx_count--; @@ -287,7 +287,7 @@ static void dtl1_receive(dtl1_info_t *info) static irqreturn_t dtl1_interrupt(int irq, void *dev_inst) { - dtl1_info_t *info = dev_inst; + struct dtl1_info *info = dev_inst; unsigned int iobase; unsigned char msr; int boguscount = 0; @@ -365,7 +365,7 @@ static int dtl1_hci_open(struct hci_dev *hdev) static int dtl1_hci_flush(struct hci_dev *hdev) { - dtl1_info_t *info = hci_get_drvdata(hdev); + struct dtl1_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); @@ -387,9 +387,9 @@ static int dtl1_hci_close(struct hci_dev *hdev) static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { - dtl1_info_t *info = hci_get_drvdata(hdev); + struct dtl1_info *info = hci_get_drvdata(hdev); struct sk_buff *s; - nsh_t nsh; + struct nsh nsh; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: @@ -436,7 +436,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) /* ======================== Card services HCI interaction ======================== */ -static int dtl1_open(dtl1_info_t *info) +static int dtl1_open(struct dtl1_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; @@ -505,7 +505,7 @@ static int dtl1_open(dtl1_info_t *info) } -static int dtl1_close(dtl1_info_t *info) +static int dtl1_close(struct dtl1_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; @@ -534,7 +534,7 @@ static int dtl1_close(dtl1_info_t *info) static int dtl1_probe(struct pcmcia_device *link) { - dtl1_info_t *info; + struct dtl1_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); @@ -552,7 +552,7 @@ static int dtl1_probe(struct pcmcia_device *link) static void dtl1_detach(struct pcmcia_device *link) { - dtl1_info_t *info = link->priv; + struct dtl1_info *info = link->priv; dtl1_close(info); pcmcia_disable_device(link); @@ -571,7 +571,7 @@ static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data) static int dtl1_config(struct pcmcia_device *link) { - dtl1_info_t *info = link->priv; + struct dtl1_info *info = link->priv; int ret; /* Look for a generic full-sized window */ diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index caacb422995d..a22838669b4e 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -237,7 +237,7 @@ static void h5_pkt_cull(struct h5 *h5) break; to_remove--; - seq = (seq - 1) % 8; + seq = (seq - 1) & 0x07; } if (seq != h5->rx_ack) diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index b7ae0a0dd5b6..aecec6d32463 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -2365,7 +2365,7 @@ static int gigaset_probe(struct usb_interface *interface, endpoint = &hostif->endpoint[0].desc; usb_fill_int_urb(ucs->urb_int_in, udev, usb_rcvintpipe(udev, - (endpoint->bEndpointAddress) & 0x0f), + usb_endpoint_num(endpoint)), ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs, endpoint->bInterval); rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL); diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index d0a41cb0cf62..00d40773b07f 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c @@ -751,7 +751,7 @@ static int gigaset_probe(struct usb_interface *interface, /* Fill the interrupt urb and send it to the core */ usb_fill_int_urb(ucs->read_urb, udev, usb_rcvintpipe(udev, - endpoint->bEndpointAddress & 0x0f), + usb_endpoint_num(endpoint)), ucs->rcvbuf, buffer_size, gigaset_read_int_callback, cs, endpoint->bInterval); diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index a4f05c54c32b..87f7dff20ff6 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -1454,66 +1454,63 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members) #ifdef CMX_CONF_DEBUG if (0) { #else - if (members == 2) { + if (members == 2) { #endif - /* "other" becomes other party */ - other = (list_entry(conf->mlist.next, - struct dsp_conf_member, list))->dsp; - if (other == member) - other = (list_entry(conf->mlist.prev, - struct dsp_conf_member, list))->dsp; - o_q = other->rx_buff; /* received data */ - o_rr = (other->rx_R + len) & CMX_BUFF_MASK; - /* end of rx-pointer */ - o_r = (o_rr - rr + r) & CMX_BUFF_MASK; - /* start rx-pointer at current read position*/ - /* -> if echo is NOT enabled */ - if (!dsp->echo.software) { - /* - * -> copy other member's rx-data, - * if tx-data is available, mix - */ - while (o_r != o_rr && t != tt) { - *d++ = dsp_audio_mix_law[(p[t] << 8) | o_q[o_r]]; - t = (t + 1) & CMX_BUFF_MASK; - o_r = (o_r + 1) & CMX_BUFF_MASK; - } - while (o_r != o_rr) { - *d++ = o_q[o_r]; - o_r = (o_r + 1) & CMX_BUFF_MASK; - } - /* -> if echo is enabled */ - } else { - /* - * -> mix other member's rx-data with echo, - * if tx-data is available, mix - */ - while (r != rr && t != tt) { - sample = dsp_audio_law_to_s32[p[t]] + - dsp_audio_law_to_s32[q[r]] + - dsp_audio_law_to_s32[o_q[o_r]]; - if (sample < -32768) - sample = -32768; - else if (sample > 32767) - sample = 32767; - *d++ = dsp_audio_s16_to_law[sample & 0xffff]; - /* tx-data + rx_data + echo */ - t = (t + 1) & CMX_BUFF_MASK; - r = (r + 1) & CMX_BUFF_MASK; - o_r = (o_r + 1) & CMX_BUFF_MASK; - } - while (r != rr) { - *d++ = dsp_audio_mix_law[(q[r] << 8) | o_q[o_r]]; - r = (r + 1) & CMX_BUFF_MASK; - o_r = (o_r + 1) & CMX_BUFF_MASK; - } + /* "other" becomes other party */ + other = (list_entry(conf->mlist.next, + struct dsp_conf_member, list))->dsp; + if (other == member) + other = (list_entry(conf->mlist.prev, + struct dsp_conf_member, list))->dsp; + o_q = other->rx_buff; /* received data */ + o_rr = (other->rx_R + len) & CMX_BUFF_MASK; + /* end of rx-pointer */ + o_r = (o_rr - rr + r) & CMX_BUFF_MASK; + /* start rx-pointer at current read position*/ + /* -> if echo is NOT enabled */ + if (!dsp->echo.software) { + /* + * -> copy other member's rx-data, + * if tx-data is available, mix + */ + while (o_r != o_rr && t != tt) { + *d++ = dsp_audio_mix_law[(p[t] << 8) | o_q[o_r]]; + t = (t + 1) & CMX_BUFF_MASK; + o_r = (o_r + 1) & CMX_BUFF_MASK; + } + while (o_r != o_rr) { + *d++ = o_q[o_r]; + o_r = (o_r + 1) & CMX_BUFF_MASK; + } + /* -> if echo is enabled */ + } else { + /* + * -> mix other member's rx-data with echo, + * if tx-data is available, mix + */ + while (r != rr && t != tt) { + sample = dsp_audio_law_to_s32[p[t]] + + dsp_audio_law_to_s32[q[r]] + + dsp_audio_law_to_s32[o_q[o_r]]; + if (sample < -32768) + sample = -32768; + else if (sample > 32767) + sample = 32767; + *d++ = dsp_audio_s16_to_law[sample & 0xffff]; + /* tx-data + rx_data + echo */ + t = (t + 1) & CMX_BUFF_MASK; + r = (r + 1) & CMX_BUFF_MASK; + o_r = (o_r + 1) & CMX_BUFF_MASK; + } + while (r != rr) { + *d++ = dsp_audio_mix_law[(q[r] << 8) | o_q[o_r]]; + r = (r + 1) & CMX_BUFF_MASK; + o_r = (o_r + 1) & CMX_BUFF_MASK; } - dsp->tx_R = t; - goto send_packet; } -#ifdef DSP_NEVER_DEFINED + dsp->tx_R = t; + goto send_packet; } -#endif /* PROCESS DATA (three or more members) */ /* -> if echo is NOT enabled */ if (!dsp->echo.software) { diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index ee2c73a9de39..7e9e522fd476 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -234,24 +234,6 @@ static inline int __check_agg_selection_timer(struct port *port) } /** - * __get_state_machine_lock - lock the port's state machines - * @port: the port we're looking at - */ -static inline void __get_state_machine_lock(struct port *port) -{ - spin_lock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock)); -} - -/** - * __release_state_machine_lock - unlock the port's state machines - * @port: the port we're looking at - */ -static inline void __release_state_machine_lock(struct port *port) -{ - spin_unlock_bh(&(SLAVE_AD_INFO(port->slave)->state_machine_lock)); -} - -/** * __get_link_speed - get a port's speed * @port: the port we're looking at * @@ -315,15 +297,14 @@ static u16 __get_link_speed(struct port *port) static u8 __get_duplex(struct port *port) { struct slave *slave = port->slave; - u8 retval; /* handling a special case: when the configuration starts with * link down, it sets the duplex to 0. */ - if (slave->link != BOND_LINK_UP) + if (slave->link != BOND_LINK_UP) { retval = 0x0; - else { + } else { switch (slave->duplex) { case DUPLEX_FULL: retval = 0x1; @@ -341,16 +322,6 @@ static u8 __get_duplex(struct port *port) return retval; } -/** - * __initialize_port_locks - initialize a port's STATE machine spinlock - * @port: the slave of the port we're looking at - */ -static inline void __initialize_port_locks(struct slave *slave) -{ - /* make sure it isn't called twice */ - spin_lock_init(&(SLAVE_AD_INFO(slave)->state_machine_lock)); -} - /* Conversions */ /** @@ -1843,7 +1814,6 @@ void bond_3ad_bind_slave(struct slave *slave) ad_initialize_port(port, bond->params.lacp_fast); - __initialize_port_locks(slave); port->slave = slave; port->actor_port_number = SLAVE_AD_INFO(slave)->id; /* key is determined according to the link speed, duplex and user key(which @@ -1899,6 +1869,8 @@ void bond_3ad_unbind_slave(struct slave *slave) struct slave *slave_iter; struct list_head *iter; + /* Sync against bond_3ad_state_machine_handler() */ + spin_lock_bh(&bond->mode_lock); aggregator = &(SLAVE_AD_INFO(slave)->aggregator); port = &(SLAVE_AD_INFO(slave)->port); @@ -1906,7 +1878,7 @@ void bond_3ad_unbind_slave(struct slave *slave) if (!port->slave) { netdev_warn(bond->dev, "Trying to unbind an uninitialized port on %s\n", slave->dev->name); - return; + goto out; } netdev_dbg(bond->dev, "Unbinding Link Aggregation Group %d\n", @@ -2032,6 +2004,9 @@ void bond_3ad_unbind_slave(struct slave *slave) } } port->slave = NULL; + +out: + spin_unlock_bh(&bond->mode_lock); } /** @@ -2057,7 +2032,11 @@ void bond_3ad_state_machine_handler(struct work_struct *work) struct port *port; bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; - read_lock(&bond->lock); + /* Lock to protect data accessed by all (e.g., port->sm_vars) and + * against running with bond_3ad_unbind_slave. ad_rx_machine may run + * concurrently due to incoming LACPDU as well. + */ + spin_lock_bh(&bond->mode_lock); rcu_read_lock(); /* check if there are any slaves */ @@ -2093,12 +2072,6 @@ void bond_3ad_state_machine_handler(struct work_struct *work) goto re_arm; } - /* Lock around state machines to protect data accessed - * by all (e.g., port->sm_vars). ad_rx_machine may run - * concurrently due to incoming LACPDU. - */ - __get_state_machine_lock(port); - ad_rx_machine(NULL, port); ad_periodic_machine(port); ad_port_selection_logic(port); @@ -2108,8 +2081,6 @@ void bond_3ad_state_machine_handler(struct work_struct *work) /* turn off the BEGIN bit, since we already handled it */ if (port->sm_vars & AD_PORT_BEGIN) port->sm_vars &= ~AD_PORT_BEGIN; - - __release_state_machine_lock(port); } re_arm: @@ -2120,7 +2091,7 @@ re_arm: } } rcu_read_unlock(); - read_unlock(&bond->lock); + spin_unlock_bh(&bond->mode_lock); if (should_notify_rtnl && rtnl_trylock()) { bond_slave_state_notify(bond); @@ -2161,9 +2132,9 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n", port->actor_port_number); /* Protect against concurrent state machines */ - __get_state_machine_lock(port); + spin_lock(&slave->bond->mode_lock); ad_rx_machine(lacpdu, port); - __release_state_machine_lock(port); + spin_unlock(&slave->bond->mode_lock); break; case AD_TYPE_MARKER: @@ -2213,7 +2184,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) return; } - __get_state_machine_lock(port); + spin_lock_bh(&slave->bond->mode_lock); port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= @@ -2224,7 +2195,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave) */ port->sm_vars |= AD_PORT_BEGIN; - __release_state_machine_lock(port); + spin_unlock_bh(&slave->bond->mode_lock); } /** @@ -2246,7 +2217,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) return; } - __get_state_machine_lock(port); + spin_lock_bh(&slave->bond->mode_lock); port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; port->actor_oper_port_key = port->actor_admin_port_key |= @@ -2257,7 +2228,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave) */ port->sm_vars |= AD_PORT_BEGIN; - __release_state_machine_lock(port); + spin_unlock_bh(&slave->bond->mode_lock); } /** @@ -2280,7 +2251,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) return; } - __get_state_machine_lock(port); + spin_lock_bh(&slave->bond->mode_lock); /* on link down we are zeroing duplex and speed since * some of the adaptors(ce1000.lan) report full duplex/speed * instead of N/A(duplex) / 0(speed). @@ -2311,7 +2282,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) */ port->sm_vars |= AD_PORT_BEGIN; - __release_state_machine_lock(port); + spin_unlock_bh(&slave->bond->mode_lock); } /** @@ -2395,7 +2366,6 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond, return 0; } -/* Wrapper used to hold bond->lock so no slave manipulation can occur */ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) { int ret; @@ -2477,20 +2447,16 @@ err_free: int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave) { - int ret = RX_HANDLER_ANOTHER; struct lacpdu *lacpdu, _lacpdu; if (skb->protocol != PKT_TYPE_LACPDU) - return ret; + return RX_HANDLER_ANOTHER; lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); if (!lacpdu) - return ret; + return RX_HANDLER_ANOTHER; - read_lock(&bond->lock); - ret = bond_3ad_rx_indication(lacpdu, slave, skb->len); - read_unlock(&bond->lock); - return ret; + return bond_3ad_rx_indication(lacpdu, slave, skb->len); } /** @@ -2500,7 +2466,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, * When modify lacp_rate parameter via sysfs, * update actor_oper_port_state of each port. * - * Hold slave->state_machine_lock, + * Hold bond->mode_lock, * so we can modify port->actor_oper_port_state, * no matter bond is up or down. */ @@ -2512,13 +2478,13 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) int lacp_fast; lacp_fast = bond->params.lacp_fast; + spin_lock_bh(&bond->mode_lock); bond_for_each_slave(bond, slave, iter) { port = &(SLAVE_AD_INFO(slave)->port); - __get_state_machine_lock(port); if (lacp_fast) port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; else port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT; - __release_state_machine_lock(port); } + spin_unlock_bh(&bond->mode_lock); } diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index bb03b1df2f3e..c5f14ac63f3e 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -259,7 +259,6 @@ struct ad_bond_info { struct ad_slave_info { struct aggregator aggregator; /* 802.3ad aggregator structure */ struct port port; /* 802.3ad port structure */ - spinlock_t state_machine_lock; /* mutex state machines vs. incoming LACPDU */ u16 id; }; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 95dd1f58c260..615f3bebd019 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -100,27 +100,6 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size) /*********************** tlb specific functions ***************************/ -static inline void _lock_tx_hashtbl_bh(struct bonding *bond) -{ - spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); -} - -static inline void _unlock_tx_hashtbl_bh(struct bonding *bond) -{ - spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); -} - -static inline void _lock_tx_hashtbl(struct bonding *bond) -{ - spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); -} - -static inline void _unlock_tx_hashtbl(struct bonding *bond) -{ - spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock)); -} - -/* Caller must hold tx_hashtbl lock */ static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load) { if (save_load) { @@ -140,7 +119,6 @@ static inline void tlb_init_slave(struct slave *slave) SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX; } -/* Caller must hold bond lock for read, BH disabled */ static void __tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load) { @@ -163,13 +141,12 @@ static void __tlb_clear_slave(struct bonding *bond, struct slave *slave, tlb_init_slave(slave); } -/* Caller must hold bond lock for read */ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load) { - _lock_tx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); __tlb_clear_slave(bond, slave, save_load); - _unlock_tx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } /* Must be called before starting the monitor timer */ @@ -184,14 +161,14 @@ static int tlb_initialize(struct bonding *bond) if (!new_hashtbl) return -1; - _lock_tx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); bond_info->tx_hashtbl = new_hashtbl; for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0); - _unlock_tx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); return 0; } @@ -202,12 +179,12 @@ static void tlb_deinitialize(struct bonding *bond) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct tlb_up_slave *arr; - _lock_tx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); kfree(bond_info->tx_hashtbl); bond_info->tx_hashtbl = NULL; - _unlock_tx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); arr = rtnl_dereference(bond_info->slave_arr); if (arr) @@ -220,7 +197,6 @@ static long long compute_gap(struct slave *slave) (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */ } -/* Caller must hold bond lock for read */ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond) { struct slave *slave, *least_loaded; @@ -281,42 +257,23 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index, return assigned_slave; } -/* Caller must hold bond lock for read */ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len) { struct slave *tx_slave; - /* - * We don't need to disable softirq here, becase + + /* We don't need to disable softirq here, becase * tlb_choose_channel() is only called by bond_alb_xmit() * which already has softirq disabled. */ - _lock_tx_hashtbl(bond); + spin_lock(&bond->mode_lock); tx_slave = __tlb_choose_channel(bond, hash_index, skb_len); - _unlock_tx_hashtbl(bond); + spin_unlock(&bond->mode_lock); + return tx_slave; } /*********************** rlb specific functions ***************************/ -static inline void _lock_rx_hashtbl_bh(struct bonding *bond) -{ - spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); -} - -static inline void _unlock_rx_hashtbl_bh(struct bonding *bond) -{ - spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); -} - -static inline void _lock_rx_hashtbl(struct bonding *bond) -{ - spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); -} - -static inline void _unlock_rx_hashtbl(struct bonding *bond) -{ - spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); -} /* when an ARP REPLY is received from a client update its info * in the rx_hashtbl @@ -327,7 +284,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) struct rlb_client_info *client_info; u32 hash_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); client_info = &(bond_info->rx_hashtbl[hash_index]); @@ -342,7 +299,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp) bond_info->rx_ntt = 1; } - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond, @@ -378,15 +335,15 @@ out: return RX_HANDLER_ANOTHER; } -/* Caller must hold bond lock for read */ -static struct slave *rlb_next_rx_slave(struct bonding *bond) +/* Caller must hold rcu_read_lock() */ +static struct slave *__rlb_next_rx_slave(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct slave *before = NULL, *rx_slave = NULL, *slave; struct list_head *iter; bool found = false; - bond_for_each_slave(bond, slave, iter) { + bond_for_each_slave_rcu(bond, slave, iter) { if (!bond_slave_can_tx(slave)) continue; if (!found) { @@ -411,35 +368,16 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond) return rx_slave; } -/* Caller must hold rcu_read_lock() for read */ -static struct slave *__rlb_next_rx_slave(struct bonding *bond) +/* Caller must hold RTNL, rcu_read_lock is obtained only to silence checkers */ +static struct slave *rlb_next_rx_slave(struct bonding *bond) { - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - struct slave *before = NULL, *rx_slave = NULL, *slave; - struct list_head *iter; - bool found = false; + struct slave *rx_slave; - bond_for_each_slave_rcu(bond, slave, iter) { - if (!bond_slave_can_tx(slave)) - continue; - if (!found) { - if (!before || before->speed < slave->speed) - before = slave; - } else { - if (!rx_slave || rx_slave->speed < slave->speed) - rx_slave = slave; - } - if (slave == bond_info->rx_slave) - found = true; - } - /* we didn't find anything after the current or we have something - * better before and up to the current slave - */ - if (!rx_slave || (before && rx_slave->speed < before->speed)) - rx_slave = before; + ASSERT_RTNL(); - if (rx_slave) - bond_info->rx_slave = rx_slave; + rcu_read_lock(); + rx_slave = __rlb_next_rx_slave(bond); + rcu_read_unlock(); return rx_slave; } @@ -447,11 +385,11 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond) /* teach the switch the mac of a disabled slave * on the primary for fault tolerance * - * Caller must hold bond->curr_slave_lock for write or bond lock for write + * Caller must hold RTNL */ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) { - struct slave *curr_active = bond_deref_active_protected(bond); + struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); if (!curr_active) return; @@ -479,7 +417,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) u32 index, next_index; /* clear slave from rx_hashtbl */ - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); rx_hash_table = bond_info->rx_hashtbl; index = bond_info->rx_hashtbl_used_head; @@ -510,14 +448,10 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) } } - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); - write_lock_bh(&bond->curr_slave_lock); - - if (slave != bond_deref_active_protected(bond)) + if (slave != rtnl_dereference(bond->curr_active_slave)) rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr); - - write_unlock_bh(&bond->curr_slave_lock); } static void rlb_update_client(struct rlb_client_info *client_info) @@ -565,7 +499,7 @@ static void rlb_update_rx_clients(struct bonding *bond) struct rlb_client_info *client_info; u32 hash_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; @@ -583,7 +517,7 @@ static void rlb_update_rx_clients(struct bonding *bond) */ bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY; - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } /* The slave was assigned a new mac address - update the clients */ @@ -594,7 +528,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla int ntt = 0; u32 hash_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; @@ -615,7 +549,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY; } - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } /* mark all clients using src_ip to be updated */ @@ -625,7 +559,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) struct rlb_client_info *client_info; u32 hash_index; - _lock_rx_hashtbl(bond); + spin_lock(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; @@ -636,7 +570,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) netdev_err(bond->dev, "found a client with no channel in the client's hash table\n"); continue; } - /*update all clients using this src_ip, that are not assigned + /* update all clients using this src_ip, that are not assigned * to the team's address (curr_active_slave) and have a known * unicast mac address. */ @@ -649,10 +583,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) } } - _unlock_rx_hashtbl(bond); + spin_unlock(&bond->mode_lock); } -/* Caller must hold both bond and ptr locks for read */ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); @@ -661,7 +594,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon struct rlb_client_info *client_info; u32 hash_index = 0; - _lock_rx_hashtbl(bond); + spin_lock(&bond->mode_lock); curr_active_slave = rcu_dereference(bond->curr_active_slave); @@ -680,7 +613,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon assigned_slave = client_info->slave; if (assigned_slave) { - _unlock_rx_hashtbl(bond); + spin_unlock(&bond->mode_lock); return assigned_slave; } } else { @@ -742,7 +675,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon } } - _unlock_rx_hashtbl(bond); + spin_unlock(&bond->mode_lock); return assigned_slave; } @@ -763,9 +696,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) return NULL; if (arp->op_code == htons(ARPOP_REPLY)) { - /* the arp must be sent on the selected - * rx channel - */ + /* the arp must be sent on the selected rx channel */ tx_slave = rlb_choose_channel(skb, bond); if (tx_slave) ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr); @@ -795,7 +726,6 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) return tx_slave; } -/* Caller must hold bond lock for read */ static void rlb_rebalance(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); @@ -804,7 +734,7 @@ static void rlb_rebalance(struct bonding *bond) int ntt; u32 hash_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); ntt = 0; hash_index = bond_info->rx_hashtbl_used_head; @@ -822,10 +752,10 @@ static void rlb_rebalance(struct bonding *bond) /* update the team's flag only after the whole iteration */ if (ntt) bond_info->rx_ntt = 1; - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } -/* Caller must hold rx_hashtbl lock */ +/* Caller must hold mode_lock */ static void rlb_init_table_entry_dst(struct rlb_client_info *entry) { entry->used_next = RLB_NULL_INDEX; @@ -913,15 +843,16 @@ static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash) bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash; } -/* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does - * not match arp->mac_src */ +/* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does + * not match arp->mac_src + */ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src)); u32 index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); index = bond_info->rx_hashtbl[ip_src_hash].src_first; while (index != RLB_NULL_INDEX) { @@ -932,7 +863,7 @@ static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp) rlb_delete_table_entry(bond, index); index = next_index; } - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } static int rlb_initialize(struct bonding *bond) @@ -946,7 +877,7 @@ static int rlb_initialize(struct bonding *bond) if (!new_hashtbl) return -1; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); bond_info->rx_hashtbl = new_hashtbl; @@ -955,7 +886,7 @@ static int rlb_initialize(struct bonding *bond) for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) rlb_init_table_entry(bond_info->rx_hashtbl + i); - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); /* register to receive ARPs */ bond->recv_probe = rlb_arp_recv; @@ -967,13 +898,13 @@ static void rlb_deinitialize(struct bonding *bond) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); kfree(bond_info->rx_hashtbl); bond_info->rx_hashtbl = NULL; bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) @@ -981,7 +912,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); u32 curr_index; - _lock_rx_hashtbl_bh(bond); + spin_lock_bh(&bond->mode_lock); curr_index = bond_info->rx_hashtbl_used_head; while (curr_index != RLB_NULL_INDEX) { @@ -994,7 +925,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) curr_index = next_index; } - _unlock_rx_hashtbl_bh(bond); + spin_unlock_bh(&bond->mode_lock); } /*********************** tlb/rlb shared functions *********************/ @@ -1091,8 +1022,9 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) return 0; } - /* for rlb each slave must have a unique hw mac addresses so that */ - /* each slave will receive packets destined to a different mac */ + /* for rlb each slave must have a unique hw mac addresses so that + * each slave will receive packets destined to a different mac + */ memcpy(s_addr.sa_data, addr, dev->addr_len); s_addr.sa_family = dev->type; if (dev_set_mac_address(dev, &s_addr)) { @@ -1103,13 +1035,10 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[]) return 0; } -/* - * Swap MAC addresses between two slaves. +/* Swap MAC addresses between two slaves. * * Called with RTNL held, and no other locks. - * */ - static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2) { u8 tmp_mac_addr[ETH_ALEN]; @@ -1120,8 +1049,7 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2) } -/* - * Send learning packets after MAC address swap. +/* Send learning packets after MAC address swap. * * Called with RTNL and no other locks */ @@ -1194,7 +1122,6 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr); if (found_slave) { - /* locking: needs RTNL and nothing else */ alb_swap_mac_addr(slave, found_slave); alb_fasten_mac_swap(bond, slave, found_slave); } @@ -1243,7 +1170,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav return 0; /* Try setting slave mac to bond address and fall-through - to code handling that situation below... */ + * to code handling that situation below... + */ alb_set_slave_mac_addr(slave, bond->dev->dev_addr); } @@ -1351,7 +1279,6 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled) if (rlb_enabled) { bond->alb_info.rlb_enabled = 1; - /* initialize rlb */ res = rlb_initialize(bond); if (res) { tlb_deinitialize(bond); @@ -1375,7 +1302,7 @@ void bond_alb_deinitialize(struct bonding *bond) } static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, - struct slave *tx_slave) + struct slave *tx_slave) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct ethhdr *eth_data = eth_hdr(skb); @@ -1388,7 +1315,7 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, } if (tx_slave && bond_slave_can_tx(tx_slave)) { - if (tx_slave != rcu_dereference(bond->curr_active_slave)) { + if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) { ether_addr_copy(eth_data->h_source, tx_slave->dev->dev_addr); } @@ -1398,9 +1325,9 @@ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, } if (tx_slave && bond->params.tlb_dynamic_lb) { - _lock_tx_hashtbl(bond); + spin_lock(&bond->mode_lock); __tlb_clear_slave(bond, tx_slave, 0); - _unlock_tx_hashtbl(bond); + spin_unlock(&bond->mode_lock); } /* no suitable interface, frame not sent */ @@ -1595,13 +1522,6 @@ void bond_alb_monitor(struct work_struct *work) if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { bool strict_match; - /* change of curr_active_slave involves swapping of mac addresses. - * in order to avoid this swapping from happening while - * sending the learning packets, the curr_slave_lock must be held for - * read. - */ - read_lock(&bond->curr_slave_lock); - bond_for_each_slave_rcu(bond, slave, iter) { /* If updating current_active, use all currently * user mac addreses (!strict_match). Otherwise, only @@ -1613,17 +1533,11 @@ void bond_alb_monitor(struct work_struct *work) alb_send_learning_packets(slave, slave->dev->dev_addr, strict_match); } - - read_unlock(&bond->curr_slave_lock); - bond_info->lp_counter = 0; } /* rebalance tx traffic */ if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) { - - read_lock(&bond->curr_slave_lock); - bond_for_each_slave_rcu(bond, slave, iter) { tlb_clear_slave(bond, slave, 1); if (slave == rcu_access_pointer(bond->curr_active_slave)) { @@ -1633,19 +1547,14 @@ void bond_alb_monitor(struct work_struct *work) bond_info->unbalanced_load = 0; } } - - read_unlock(&bond->curr_slave_lock); - bond_info->tx_rebalance_counter = 0; } - /* handle rlb stuff */ if (bond_info->rlb_enabled) { if (bond_info->primary_is_promisc && (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { - /* - * dev_set_promiscuity requires rtnl and + /* dev_set_promiscuity requires rtnl and * nothing else. Avoid race with bond_close. */ rcu_read_unlock(); @@ -1715,8 +1624,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave) return 0; } -/* - * Remove slave from tlb and rlb hash tables, and fix up MAC addresses +/* Remove slave from tlb and rlb hash tables, and fix up MAC addresses * if necessary. * * Caller must hold RTNL and no other locks @@ -1739,7 +1647,6 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave) } -/* Caller must hold bond lock for read */ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); @@ -1775,22 +1682,14 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char * Set the bond->curr_active_slave to @new_slave and handle * mac address swapping and promiscuity changes as needed. * - * If new_slave is NULL, caller must hold curr_slave_lock or - * bond->lock for write. - * - * If new_slave is not NULL, caller must hold RTNL, curr_slave_lock - * for write. Processing here may sleep, so no other locks may be held. + * Caller must hold RTNL */ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave) - __releases(&bond->curr_slave_lock) - __acquires(&bond->curr_slave_lock) { struct slave *swap_slave; struct slave *curr_active; - curr_active = rcu_dereference_protected(bond->curr_active_slave, - !new_slave || - lockdep_is_held(&bond->curr_slave_lock)); + curr_active = rtnl_dereference(bond->curr_active_slave); if (curr_active == new_slave) return; @@ -1812,8 +1711,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave if (!swap_slave) swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr); - /* - * Arrange for swap_slave and new_slave to temporarily be + /* Arrange for swap_slave and new_slave to temporarily be * ignored so we can mess with their MAC addresses without * fear of interference from transmit activity. */ @@ -1821,10 +1719,6 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave tlb_clear_slave(bond, swap_slave, 1); tlb_clear_slave(bond, new_slave, 1); - write_unlock_bh(&bond->curr_slave_lock); - - ASSERT_RTNL(); - /* in TLB mode, the slave might flip down/up with the old dev_addr, * and thus filter bond->dev_addr's packets, so force bond's mac */ @@ -1853,16 +1747,10 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave alb_send_learning_packets(new_slave, bond->dev->dev_addr, false); } - - write_lock_bh(&bond->curr_slave_lock); } -/* - * Called with RTNL - */ +/* Called with RTNL */ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) - __acquires(&bond->lock) - __releases(&bond->lock) { struct bonding *bond = netdev_priv(bond_dev); struct sockaddr *sa = addr; @@ -1895,14 +1783,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) } else { alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr); - read_lock(&bond->lock); alb_send_learning_packets(curr_active, bond_dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform clients mac address has changed */ rlb_req_update_slave_clients(bond, curr_active); } - read_unlock(&bond->lock); } return 0; diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h index aaeac61d03cf..3c6a7ff974d7 100644 --- a/drivers/net/bonding/bond_alb.h +++ b/drivers/net/bonding/bond_alb.h @@ -147,7 +147,6 @@ struct tlb_up_slave { struct alb_bond_info { struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */ - spinlock_t tx_hashtbl_lock; u32 unbalanced_load; int tx_rebalance_counter; int lp_counter; @@ -156,7 +155,6 @@ struct alb_bond_info { /* -------- rlb parameters -------- */ int rlb_enabled; struct rlb_client_info *rx_hashtbl; /* Receive hash table */ - spinlock_t rx_hashtbl_lock; u32 rx_hashtbl_used_head; u8 rx_ntt; /* flag - need to transmit * to all rx clients diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index 280971b227ea..8f99082f90eb 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -13,9 +13,7 @@ static struct dentry *bonding_debug_root; -/* - * Show RLB hash table - */ +/* Show RLB hash table */ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) { struct bonding *bond = m->private; @@ -29,7 +27,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) seq_printf(m, "SourceIP DestinationIP " "Destination MAC DEV\n"); - spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); + spin_lock_bh(&bond->mode_lock); hash_index = bond_info->rx_hashtbl_used_head; for (; hash_index != RLB_NULL_INDEX; @@ -42,7 +40,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) client_info->slave->dev->name); } - spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); + spin_unlock_bh(&bond->mode_lock); return 0; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 798ae69fb63c..53904758d693 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -253,8 +253,7 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, dev_queue_xmit(skb); } -/* - * In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, +/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, * We don't protect the slave list iteration with a lock because: * a. This operation is performed in IOCTL context, * b. The operation is protected by the RTNL semaphore in the 8021q code, @@ -326,8 +325,7 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, /*------------------------------- Link status -------------------------------*/ -/* - * Set the carrier state for the master according to the state of its +/* Set the carrier state for the master according to the state of its * slaves. If any slaves are up, the master is up. In 802.3ad mode, * do special 802.3ad magic. * @@ -362,8 +360,7 @@ down: return 0; } -/* - * Get link speed and duplex from the slave's base driver +/* Get link speed and duplex from the slave's base driver * using ethtool. If for some reason the call fails or the * values are invalid, set speed and duplex to -1, * and return. @@ -416,8 +413,7 @@ const char *bond_slave_link_status(s8 link) } } -/* - * if <dev> supports MII link status reporting, check its link status. +/* if <dev> supports MII link status reporting, check its link status. * * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(), * depending upon the setting of the use_carrier parameter. @@ -454,14 +450,14 @@ static int bond_check_dev_link(struct bonding *bond, /* Ethtool can't be used, fallback to MII ioctls. */ ioctl = slave_ops->ndo_do_ioctl; if (ioctl) { - /* TODO: set pointer to correct ioctl on a per team member */ - /* bases to make this more efficient. that is, once */ - /* we determine the correct ioctl, we will always */ - /* call it and not the others for that team */ - /* member. */ - - /* - * We cannot assume that SIOCGMIIPHY will also read a + /* TODO: set pointer to correct ioctl on a per team member + * bases to make this more efficient. that is, once + * we determine the correct ioctl, we will always + * call it and not the others for that team + * member. + */ + + /* We cannot assume that SIOCGMIIPHY will also read a * register; not all network drivers (e.g., e100) * support that. */ @@ -476,8 +472,7 @@ static int bond_check_dev_link(struct bonding *bond, } } - /* - * If reporting, report that either there's no dev->do_ioctl, + /* If reporting, report that either there's no dev->do_ioctl, * or both SIOCGMIIREG and get_link failed (meaning that we * cannot report link status). If not reporting, pretend * we're ok. @@ -487,9 +482,7 @@ static int bond_check_dev_link(struct bonding *bond, /*----------------------------- Multicast list ------------------------------*/ -/* - * Push the promiscuity flag down to appropriate slaves - */ +/* Push the promiscuity flag down to appropriate slaves */ static int bond_set_promiscuity(struct bonding *bond, int inc) { struct list_head *iter; @@ -512,9 +505,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc) return err; } -/* - * Push the allmulti flag down to all slaves - */ +/* Push the allmulti flag down to all slaves */ static int bond_set_allmulti(struct bonding *bond, int inc) { struct list_head *iter; @@ -537,8 +528,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc) return err; } -/* - * Retrieve the list of registered multicast addresses for the bonding +/* Retrieve the list of registered multicast addresses for the bonding * device and retransmit an IGMP JOIN request to the current active * slave. */ @@ -560,8 +550,7 @@ static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) rtnl_unlock(); } -/* Flush bond's hardware addresses from slave - */ +/* Flush bond's hardware addresses from slave */ static void bond_hw_addr_flush(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -588,8 +577,6 @@ static void bond_hw_addr_flush(struct net_device *bond_dev, static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, struct slave *old_active) { - ASSERT_RTNL(); - if (old_active) { if (bond->dev->flags & IFF_PROMISC) dev_set_promiscuity(old_active->dev, -1); @@ -632,18 +619,15 @@ static void bond_set_dev_addr(struct net_device *bond_dev, call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); } -/* - * bond_do_fail_over_mac +/* bond_do_fail_over_mac * * Perform special MAC address swapping for fail_over_mac settings * - * Called with RTNL, curr_slave_lock for write_bh. + * Called with RTNL */ static void bond_do_fail_over_mac(struct bonding *bond, struct slave *new_active, struct slave *old_active) - __releases(&bond->curr_slave_lock) - __acquires(&bond->curr_slave_lock) { u8 tmp_mac[ETH_ALEN]; struct sockaddr saddr; @@ -651,23 +635,17 @@ static void bond_do_fail_over_mac(struct bonding *bond, switch (bond->params.fail_over_mac) { case BOND_FOM_ACTIVE: - if (new_active) { - write_unlock_bh(&bond->curr_slave_lock); + if (new_active) bond_set_dev_addr(bond->dev, new_active->dev); - write_lock_bh(&bond->curr_slave_lock); - } break; case BOND_FOM_FOLLOW: - /* - * if new_active && old_active, swap them + /* if new_active && old_active, swap them * if just old_active, do nothing (going to no active slave) * if just new_active, set new_active to bond's MAC */ if (!new_active) return; - write_unlock_bh(&bond->curr_slave_lock); - if (old_active) { ether_addr_copy(tmp_mac, new_active->dev->dev_addr); ether_addr_copy(saddr.sa_data, @@ -696,7 +674,6 @@ static void bond_do_fail_over_mac(struct bonding *bond, netdev_err(bond->dev, "Error %d setting MAC of slave %s\n", -rv, new_active->dev->name); out: - write_lock_bh(&bond->curr_slave_lock); break; default: netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n", @@ -708,8 +685,8 @@ out: static bool bond_should_change_active(struct bonding *bond) { - struct slave *prim = bond->primary_slave; - struct slave *curr = bond_deref_active_protected(bond); + struct slave *prim = rtnl_dereference(bond->primary_slave); + struct slave *curr = rtnl_dereference(bond->curr_active_slave); if (!prim || !curr || curr->link != BOND_LINK_UP) return true; @@ -732,13 +709,14 @@ static bool bond_should_change_active(struct bonding *bond) */ static struct slave *bond_find_best_slave(struct bonding *bond) { - struct slave *slave, *bestslave = NULL; + struct slave *slave, *bestslave = NULL, *primary; struct list_head *iter; int mintime = bond->params.updelay; - if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP && + primary = rtnl_dereference(bond->primary_slave); + if (primary && primary->link == BOND_LINK_UP && bond_should_change_active(bond)) - return bond->primary_slave; + return primary; bond_for_each_slave(bond, slave, iter) { if (slave->link == BOND_LINK_UP) @@ -784,15 +762,15 @@ static bool bond_should_notify_peers(struct bonding *bond) * because it is apparently the best available slave we have, even though its * updelay hasn't timed out yet. * - * If new_active is not NULL, caller must hold curr_slave_lock for write_bh. + * Caller must hold RTNL. */ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) { struct slave *old_active; - old_active = rcu_dereference_protected(bond->curr_active_slave, - !new_active || - lockdep_is_held(&bond->curr_slave_lock)); + ASSERT_RTNL(); + + old_active = rtnl_dereference(bond->curr_active_slave); if (old_active == new_active) return; @@ -860,21 +838,18 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) bond_should_notify_peers(bond); } - write_unlock_bh(&bond->curr_slave_lock); - call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); if (should_notify_peers) call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); - - write_lock_bh(&bond->curr_slave_lock); } } /* resend IGMP joins since active slave has changed or * all were sent on curr_active_slave. * resend only if bond is brought up with the affected - * bonding modes and the retransmission is enabled */ + * bonding modes and the retransmission is enabled + */ if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && ((bond_uses_primary(bond) && new_active) || BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) { @@ -892,15 +867,17 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) * - The primary_slave has got its link back. * - A slave has got its link back and there's no old curr_active_slave. * - * Caller must hold curr_slave_lock for write_bh. + * Caller must hold RTNL. */ void bond_select_active_slave(struct bonding *bond) { struct slave *best_slave; int rv; + ASSERT_RTNL(); + best_slave = bond_find_best_slave(bond); - if (best_slave != bond_deref_active_protected(bond)) { + if (best_slave != rtnl_dereference(bond->curr_active_slave)) { bond_change_active_slave(bond, best_slave); rv = bond_set_carrier(bond); if (!rv) @@ -1240,8 +1217,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) slave_dev->name); } - /* - * Old ifenslave binaries are no longer supported. These can + /* Old ifenslave binaries are no longer supported. These can * be identified with moderate accuracy by the state of the slave: * the current ifenslave will set the interface down prior to * enslaving it; the old ifenslave will not. @@ -1313,7 +1289,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) call_netdevice_notifiers(NETDEV_JOIN, slave_dev); /* If this is the first slave, then we need to set the master's hardware - * address to be the same as the slave's. */ + * address to be the same as the slave's. + */ if (!bond_has_slaves(bond) && bond->dev->addr_assign_type == NET_ADDR_RANDOM) bond_set_dev_addr(bond->dev, slave_dev); @@ -1326,8 +1303,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) new_slave->bond = bond; new_slave->dev = slave_dev; - /* - * Set the new_slave's queue_id to be zero. Queue ID mapping + /* Set the new_slave's queue_id to be zero. Queue ID mapping * is set via sysfs or module option if desired. */ new_slave->queue_id = 0; @@ -1340,8 +1316,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_free; } - /* - * Save slave's original ("permanent") mac address for modes + /* Save slave's original ("permanent") mac address for modes * that need it, and for restoring it upon release, and then * set it to the master's address */ @@ -1349,8 +1324,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (!bond->params.fail_over_mac || BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { - /* - * Set slave to master's mac address. The application already + /* Set slave to master's mac address. The application already * set the master's mac address to that of the first slave */ memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); @@ -1436,8 +1410,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) link_reporting = bond_check_dev_link(bond, slave_dev, 1); if ((link_reporting == -1) && !bond->params.arp_interval) { - /* - * miimon is set but a bonded network driver + /* miimon is set but a bonded network driver * does not support ETHTOOL/MII and * arp_interval is not set. Note: if * use_carrier is enabled, we will never go @@ -1482,7 +1455,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (bond_uses_primary(bond) && bond->params.primary[0]) { /* if there is a primary slave, remember it */ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { - bond->primary_slave = new_slave; + rcu_assign_pointer(bond->primary_slave, new_slave); bond->force_primary = true; } } @@ -1570,9 +1543,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (bond_uses_primary(bond)) { block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } @@ -1596,16 +1567,16 @@ err_detach: bond_hw_addr_flush(bond_dev, slave_dev); vlan_vids_del_by_dev(slave_dev, bond_dev); - if (bond->primary_slave == new_slave) - bond->primary_slave = NULL; + if (rcu_access_pointer(bond->primary_slave) == new_slave) + RCU_INIT_POINTER(bond->primary_slave, NULL); if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_change_active_slave(bond, NULL); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } + /* either primary_slave or curr_active_slave might've changed */ + synchronize_rcu(); slave_disable_netpoll(new_slave); err_close: @@ -1639,10 +1610,9 @@ err_undo_flags: return res; } -/* - * Try to release the slave device <slave> from the bond device <master> +/* Try to release the slave device <slave> from the bond device <master> * It is legal to access curr_active_slave without a lock because all the function - * is write-locked. If "all" is true it means that the function is being called + * is RTNL-locked. If "all" is true it means that the function is being called * while destroying a bond interface and all slaves are being released. * * The rules for slave state should be: @@ -1687,14 +1657,10 @@ static int __bond_release_one(struct net_device *bond_dev, * for this slave anymore. */ netdev_rx_handler_unregister(slave_dev); - write_lock_bh(&bond->lock); - /* Inform AD package of unbinding of slave. */ if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_unbind_slave(slave); - write_unlock_bh(&bond->lock); - netdev_info(bond_dev, "Releasing %s interface %s\n", bond_is_active_slave(slave) ? "active" : "backup", slave_dev->name); @@ -1712,14 +1678,11 @@ static int __bond_release_one(struct net_device *bond_dev, bond_dev->name, slave_dev->name); } - if (bond->primary_slave == slave) - bond->primary_slave = NULL; + if (rtnl_dereference(bond->primary_slave) == slave) + RCU_INIT_POINTER(bond->primary_slave, NULL); - if (oldcurrent == slave) { - write_lock_bh(&bond->curr_slave_lock); + if (oldcurrent == slave) bond_change_active_slave(bond, NULL); - write_unlock_bh(&bond->curr_slave_lock); - } if (bond_is_lb(bond)) { /* Must be called only after the slave has been @@ -1733,16 +1696,11 @@ static int __bond_release_one(struct net_device *bond_dev, if (all) { RCU_INIT_POINTER(bond->curr_active_slave, NULL); } else if (oldcurrent == slave) { - /* - * Note that we hold RTNL over this sequence, so there + /* Note that we hold RTNL over this sequence, so there * is no concern that another slave add/remove event * will interfere. */ - write_lock_bh(&bond->curr_slave_lock); - bond_select_active_slave(bond); - - write_unlock_bh(&bond->curr_slave_lock); } if (!bond_has_slaves(bond)) { @@ -1765,10 +1723,9 @@ static int __bond_release_one(struct net_device *bond_dev, netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n", slave_dev->name, bond_dev->name); - /* must do this from outside any spinlocks */ vlan_vids_del_by_dev(slave_dev, bond_dev); - /* If the mode uses primary, then this cases was handled above by + /* If the mode uses primary, then this case was handled above by * bond_change_active_slave(..., NULL) */ if (!bond_uses_primary(bond)) { @@ -1808,7 +1765,7 @@ static int __bond_release_one(struct net_device *bond_dev, bond_free_slave(slave); - return 0; /* deletion OK */ + return 0; } /* A wrapper used because of ndo_del_link */ @@ -1817,10 +1774,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) return __bond_release_one(bond_dev, slave_dev, false); } -/* -* First release a slave and then destroy the bond if no more slaves are left. -* Must be under rtnl_lock when this function is called. -*/ +/* First release a slave and then destroy the bond if no more slaves are left. + * Must be under rtnl_lock when this function is called. + */ static int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -1843,7 +1799,6 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) info->bond_mode = BOND_MODE(bond); info->miimon = bond->params.miimon; - info->num_slaves = bond->slave_cnt; return 0; @@ -1906,9 +1861,7 @@ static int bond_miimon_inspect(struct bonding *bond) /*FALLTHRU*/ case BOND_LINK_FAIL: if (link_state) { - /* - * recovered before downdelay expired - */ + /* recovered before downdelay expired */ slave->link = BOND_LINK_UP; slave->last_link_up = jiffies; netdev_info(bond->dev, "link status up again after %d ms for interface %s\n", @@ -1974,7 +1927,7 @@ static int bond_miimon_inspect(struct bonding *bond) static void bond_miimon_commit(struct bonding *bond) { struct list_head *iter; - struct slave *slave; + struct slave *slave, *primary; bond_for_each_slave(bond, slave, iter) { switch (slave->new_link) { @@ -1985,13 +1938,14 @@ static void bond_miimon_commit(struct bonding *bond) slave->link = BOND_LINK_UP; slave->last_link_up = jiffies; + primary = rtnl_dereference(bond->primary_slave); if (BOND_MODE(bond) == BOND_MODE_8023AD) { /* prevent it from being the active one */ bond_set_backup_slave(slave); } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* make it immediately active */ bond_set_active_slave(slave); - } else if (slave != bond->primary_slave) { + } else if (slave != primary) { /* prevent it from being the active one */ bond_set_backup_slave(slave); } @@ -2009,8 +1963,7 @@ static void bond_miimon_commit(struct bonding *bond) bond_alb_handle_link_change(bond, slave, BOND_LINK_UP); - if (!bond->curr_active_slave || - (slave == bond->primary_slave)) + if (!bond->curr_active_slave || slave == primary) goto do_failover; continue; @@ -2051,19 +2004,15 @@ static void bond_miimon_commit(struct bonding *bond) } do_failover: - ASSERT_RTNL(); block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } bond_set_carrier(bond); } -/* - * bond_mii_monitor +/* bond_mii_monitor * * Really a wrapper that splits the mii monitor into two phases: an * inspection, then (if inspection indicates something needs to be done) @@ -2135,8 +2084,7 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip) return ret; } -/* - * We go to the (large) trouble of VLAN tagging ARP frames because +/* We go to the (large) trouble of VLAN tagging ARP frames because * switches in VLAN mode (especially if ports are configured as * "native" to a VLAN) might not pass non-tagged frames. */ @@ -2363,8 +2311,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, curr_active_slave = rcu_dereference(bond->curr_active_slave); - /* - * Backup slaves won't see the ARP reply, but do come through + /* Backup slaves won't see the ARP reply, but do come through * here for each ARP probe (so we swap the sip/tip to validate * the probe). In a "redundant switch, common router" type of * configuration, the ARP probe will (hopefully) travel from @@ -2404,8 +2351,7 @@ static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, last_act + mod * delta_in_ticks + delta_in_ticks/2); } -/* - * this function is called regularly to monitor each slave's link +/* This function is called regularly to monitor each slave's link * ensuring that traffic is being sent and received when arp monitoring * is used in load-balancing mode. if the adapter has been dormant, then an * arp is transmitted to generate traffic. see activebackup_arp_monitor for @@ -2501,15 +2447,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) if (slave_state_changed) { bond_slave_state_change(bond); } else if (do_failover) { - /* the bond_select_active_slave must hold RTNL - * and curr_slave_lock for write. - */ block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); - bond_select_active_slave(bond); - - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } rtnl_unlock(); @@ -2521,13 +2460,12 @@ re_arm: msecs_to_jiffies(bond->params.arp_interval)); } -/* - * Called to inspect slaves for active-backup mode ARP monitor link state +/* Called to inspect slaves for active-backup mode ARP monitor link state * changes. Sets new_link in slaves to specify what action should take * place for the slave. Returns 0 if no changes are found, >0 if changes * to link states must be committed. * - * Called with rcu_read_lock hold. + * Called with rcu_read_lock held. */ static int bond_ab_arp_inspect(struct bonding *bond) { @@ -2548,16 +2486,14 @@ static int bond_ab_arp_inspect(struct bonding *bond) continue; } - /* - * Give slaves 2*delta after being enslaved or made + /* Give slaves 2*delta after being enslaved or made * active. This avoids bouncing, as the last receive * times need a full ARP monitor cycle to be updated. */ if (bond_time_in_interval(bond, slave->last_link_up, 2)) continue; - /* - * Backup slave is down if: + /* Backup slave is down if: * - No current_arp_slave AND * - more than 3*delta since last receive AND * - the bond has an IP address @@ -2576,8 +2512,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) commit++; } - /* - * Active slave is down if: + /* Active slave is down if: * - more than 2*delta since transmitting OR * - (more than 2*delta since receive AND * the bond has an IP address) @@ -2594,8 +2529,7 @@ static int bond_ab_arp_inspect(struct bonding *bond) return commit; } -/* - * Called to commit link state changes noted by inspection step of +/* Called to commit link state changes noted by inspection step of * active-backup mode ARP monitor. * * Called with RTNL hold. @@ -2631,7 +2565,7 @@ static void bond_ab_arp_commit(struct bonding *bond) slave->dev->name); if (!rtnl_dereference(bond->curr_active_slave) || - (slave == bond->primary_slave)) + slave == rtnl_dereference(bond->primary_slave)) goto do_failover; } @@ -2663,21 +2597,17 @@ static void bond_ab_arp_commit(struct bonding *bond) } do_failover: - ASSERT_RTNL(); block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); } bond_set_carrier(bond); } -/* - * Send ARP probes for active-backup mode ARP monitor. +/* Send ARP probes for active-backup mode ARP monitor. * - * Called with rcu_read_lock hold. + * Called with rcu_read_lock held. */ static bool bond_ab_arp_probe(struct bonding *bond) { @@ -2817,9 +2747,7 @@ re_arm: /*-------------------------- netdev event handling --------------------------*/ -/* - * Change device name - */ +/* Change device name */ static int bond_event_changename(struct bonding *bond) { bond_remove_proc_entry(bond); @@ -2858,7 +2786,7 @@ static int bond_master_netdev_event(unsigned long event, static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) { - struct slave *slave = bond_slave_get_rtnl(slave_dev); + struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary; struct bonding *bond; struct net_device *bond_dev; u32 old_speed; @@ -2872,6 +2800,7 @@ static int bond_slave_netdev_event(unsigned long event, return NOTIFY_DONE; bond_dev = slave->bond->dev; bond = slave->bond; + primary = rtnl_dereference(bond->primary_slave); switch (event) { case NETDEV_UNREGISTER: @@ -2895,13 +2824,9 @@ static int bond_slave_netdev_event(unsigned long event, } break; case NETDEV_DOWN: - /* - * ... Or is it this? - */ break; case NETDEV_CHANGEMTU: - /* - * TODO: Should slaves be allowed to + /* TODO: Should slaves be allowed to * independently alter their MTU? For * an active-backup bond, slaves need * not be the same type of device, so @@ -2919,23 +2844,21 @@ static int bond_slave_netdev_event(unsigned long event, !bond->params.primary[0]) break; - if (slave == bond->primary_slave) { + if (slave == primary) { /* slave's name changed - he's no longer primary */ - bond->primary_slave = NULL; + RCU_INIT_POINTER(bond->primary_slave, NULL); } else if (!strcmp(slave_dev->name, bond->params.primary)) { /* we have a new primary slave */ - bond->primary_slave = slave; + rcu_assign_pointer(bond->primary_slave, slave); } else { /* we didn't change primary - exit */ break; } netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n", - bond->primary_slave ? slave_dev->name : "none"); + primary ? slave_dev->name : "none"); block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); break; case NETDEV_FEAT_CHANGE: @@ -2952,8 +2875,7 @@ static int bond_slave_netdev_event(unsigned long event, return NOTIFY_DONE; } -/* - * bond_netdev_event: handle netdev notifier chain events. +/* bond_netdev_event: handle netdev notifier chain events. * * This function receives events for the netdev chain. The caller (an * ioctl handler calling blocking_notifier_call_chain) holds the necessary @@ -3099,9 +3021,7 @@ static int bond_open(struct net_device *bond_dev) struct slave *slave; /* reset slave->backup and slave->inactive */ - read_lock(&bond->lock); if (bond_has_slaves(bond)) { - read_lock(&bond->curr_slave_lock); bond_for_each_slave(bond, slave, iter) { if (bond_uses_primary(bond) && slave != rcu_access_pointer(bond->curr_active_slave)) { @@ -3112,9 +3032,7 @@ static int bond_open(struct net_device *bond_dev) BOND_SLAVE_NOTIFY_NOW); } } - read_unlock(&bond->curr_slave_lock); } - read_unlock(&bond->lock); bond_work_init_all(bond); @@ -3169,7 +3087,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, memset(stats, 0, sizeof(*stats)); - read_lock_bh(&bond->lock); bond_for_each_slave(bond, slave, iter) { const struct rtnl_link_stats64 *sstats = dev_get_stats(slave->dev, &temp); @@ -3200,7 +3117,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors; stats->tx_window_errors += sstats->tx_window_errors; } - read_unlock_bh(&bond->lock); return stats; } @@ -3229,24 +3145,17 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd mii->phy_id = 0; /* Fall Through */ case SIOCGMIIREG: - /* - * We do this again just in case we were called by SIOCGMIIREG + /* We do this again just in case we were called by SIOCGMIIREG * instead of SIOCGMIIPHY. */ mii = if_mii(ifr); if (!mii) return -EINVAL; - if (mii->reg_num == 1) { mii->val_out = 0; - read_lock(&bond->lock); - read_lock(&bond->curr_slave_lock); if (netif_carrier_ok(bond->dev)) mii->val_out = BMSR_LSTATUS; - - read_unlock(&bond->curr_slave_lock); - read_unlock(&bond->lock); } return 0; @@ -3277,7 +3186,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return res; default: - /* Go on */ break; } @@ -3339,7 +3247,6 @@ static void bond_set_rx_mode(struct net_device *bond_dev) struct list_head *iter; struct slave *slave; - rcu_read_lock(); if (bond_uses_primary(bond)) { slave = rcu_dereference(bond->curr_active_slave); @@ -3377,8 +3284,7 @@ static int bond_neigh_init(struct neighbour *n) if (ret) return ret; - /* - * Assign slave's neigh_cleanup to neighbour in case cleanup is called + /* Assign slave's neigh_cleanup to neighbour in case cleanup is called * after the last slave has been detached. Assumes that all slaves * utilize the same neigh_cleanup (true at this writing as only user * is ipoib). @@ -3391,8 +3297,7 @@ static int bond_neigh_init(struct neighbour *n) return parms.neigh_setup(n); } -/* - * The bonding ndo_neigh_setup is called at init time beofre any +/* The bonding ndo_neigh_setup is called at init time beofre any * slave exists. So we must declare proxy setup function which will * be used at run time to resolve the actual slave neigh param setup. * @@ -3410,9 +3315,7 @@ static int bond_neigh_setup(struct net_device *dev, return 0; } -/* - * Change the MTU of all of a master's slaves to match the master - */ +/* Change the MTU of all of a master's slaves to match the master */ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) { struct bonding *bond = netdev_priv(bond_dev); @@ -3422,21 +3325,6 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu); - /* Can't hold bond->lock with bh disabled here since - * some base drivers panic. On the other hand we can't - * hold bond->lock without bh disabled because we'll - * deadlock. The only solution is to rely on the fact - * that we're under rtnl_lock here, and the slaves - * list won't change. This doesn't solve the problem - * of setting the slave's MTU while it is - * transmitting, but the assumption is that the base - * driver can handle that. - * - * TODO: figure out a way to safely iterate the slaves - * list, but without holding a lock around the actual - * call to the base driver. - */ - bond_for_each_slave(bond, slave, iter) { netdev_dbg(bond_dev, "s %p c_m %p\n", slave, slave->dev->netdev_ops->ndo_change_mtu); @@ -3480,8 +3368,7 @@ unwind: return res; } -/* - * Change HW address +/* Change HW address * * Note that many devices must be down to change the HW address, and * downing the master releases all slaves. We can make bonds full of @@ -3511,21 +3398,6 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) if (!is_valid_ether_addr(sa->sa_data)) return -EADDRNOTAVAIL; - /* Can't hold bond->lock with bh disabled here since - * some base drivers panic. On the other hand we can't - * hold bond->lock without bh disabled because we'll - * deadlock. The only solution is to rely on the fact - * that we're under rtnl_lock here, and the slaves - * list won't change. This doesn't solve the problem - * of setting the slave's hw address while it is - * transmitting, but the assumption is that the base - * driver can handle that. - * - * TODO: figure out a way to safely iterate the slaves - * list, but without holding a lock around the actual - * call to the base driver. - */ - bond_for_each_slave(bond, slave, iter) { netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name); res = dev_set_mac_address(slave->dev, addr); @@ -3654,7 +3526,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev */ if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) { slave = rcu_dereference(bond->curr_active_slave); - if (slave && bond_slave_can_tx(slave)) + if (slave) bond_dev_queue_xmit(bond, skb, slave->dev); else bond_xmit_slave_id(bond, skb, 0); @@ -3672,8 +3544,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev return NETDEV_TX_OK; } -/* - * in active-backup mode, we know that bond->curr_active_slave is always valid if +/* In active-backup mode, we know that bond->curr_active_slave is always valid if * the bond has a usable interface. */ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev) @@ -3726,7 +3597,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) bond_dev->name, __func__); continue; } - /* bond_dev_queue_xmit always returns 0 */ bond_dev_queue_xmit(bond, skb2, slave->dev); } } @@ -3740,9 +3610,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) /*------------------------- Device initialization ---------------------------*/ -/* - * Lookup the slave that corresponds to a qid - */ +/* Lookup the slave that corresponds to a qid */ static inline int bond_slave_override(struct bonding *bond, struct sk_buff *skb) { @@ -3771,17 +3639,14 @@ static inline int bond_slave_override(struct bonding *bond, static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { - /* - * This helper function exists to help dev_pick_tx get the correct + /* This helper function exists to help dev_pick_tx get the correct * destination queue. Using a helper function skips a call to * skb_tx_hash and will put the skbs in the queue we expect on their * way down to the bonding driver. */ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; - /* - * Save the original txq to restore before passing to the driver - */ + /* Save the original txq to restore before passing to the driver */ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; if (unlikely(txq >= dev->real_num_tx_queues)) { @@ -3829,8 +3694,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) struct bonding *bond = netdev_priv(dev); netdev_tx_t ret = NETDEV_TX_OK; - /* - * If we risk deadlock from transmitting this in the + /* If we risk deadlock from transmitting this in the * netpoll path, tell netpoll to queue the frame for later tx */ if (unlikely(is_netpoll_tx_blocked(dev))) @@ -3862,7 +3726,6 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev, * the true receive or transmit bandwidth (not all modes are symmetric) * this is an accurate maximum. */ - read_lock(&bond->lock); bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { if (slave->speed != SPEED_UNKNOWN) @@ -3873,7 +3736,6 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev, } } ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN); - read_unlock(&bond->lock); return 0; } @@ -3935,9 +3797,7 @@ void bond_setup(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - /* initialize rwlocks */ - rwlock_init(&bond->lock); - rwlock_init(&bond->curr_slave_lock); + spin_lock_init(&bond->mode_lock); bond->params = bonding_defaults; /* Initialize pointers */ @@ -3958,8 +3818,7 @@ void bond_setup(struct net_device *bond_dev) bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT; bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); - /* don't acquire bond device's netif_tx_lock when - * transmitting */ + /* don't acquire bond device's netif_tx_lock when transmitting */ bond_dev->features |= NETIF_F_LLTX; /* By default, we declare the bond to be fully @@ -3982,10 +3841,9 @@ void bond_setup(struct net_device *bond_dev) bond_dev->features |= bond_dev->hw_features; } -/* -* Destroy a bonding device. -* Must be under rtnl_lock when this function is called. -*/ +/* Destroy a bonding device. + * Must be under rtnl_lock when this function is called. + */ static void bond_uninit(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); @@ -4013,9 +3871,7 @@ static int bond_check_params(struct bond_params *params) const struct bond_opt_value *valptr; int arp_all_targets_value; - /* - * Convert string parameters. - */ + /* Convert string parameters. */ if (mode) { bond_opt_initstr(&newval, mode); valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval); @@ -4192,9 +4048,9 @@ static int bond_check_params(struct bond_params *params) for (arp_ip_count = 0, i = 0; (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { - /* not complete check, but should be good enough to - catch mistakes */ __be32 ip; + + /* not a complete check, but good enough to catch mistakes */ if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || !bond_is_ip_target_ok(ip)) { pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", @@ -4377,26 +4233,14 @@ static void bond_set_lockdep_class(struct net_device *dev) dev->qdisc_tx_busylock = &bonding_tx_busylock_key; } -/* - * Called from registration process - */ +/* Called from registration process */ static int bond_init(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); - struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); netdev_dbg(bond_dev, "Begin bond_init\n"); - /* - * Initialize locks that may be required during - * en/deslave operations. All of the bond_open work - * (of which this is part) should really be moved to - * a phase prior to dev_open - */ - spin_lock_init(&(bond_info->tx_hashtbl_lock)); - spin_lock_init(&(bond_info->rx_hashtbl_lock)); - bond->wq = create_singlethread_workqueue(bond_dev->name); if (!bond->wq) return -ENOMEM; @@ -4543,9 +4387,7 @@ static void __exit bonding_exit(void) unregister_pernet_subsys(&bond_net_ops); #ifdef CONFIG_NET_POLL_CONTROLLER - /* - * Make sure we don't have an imbalance on our netpoll blocking - */ + /* Make sure we don't have an imbalance on our netpoll blocking */ WARN_ON(atomic_read(&netpoll_block_tx)); #endif } diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index d163e112f04c..c13d83e15ace 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -96,6 +96,10 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = { [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED }, }; +static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = { + [IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 }, +}; + static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { @@ -107,6 +111,33 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } +static int bond_slave_changelink(struct net_device *bond_dev, + struct net_device *slave_dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct bonding *bond = netdev_priv(bond_dev); + struct bond_opt_value newval; + int err; + + if (!data) + return 0; + + if (data[IFLA_BOND_SLAVE_QUEUE_ID]) { + u16 queue_id = nla_get_u16(data[IFLA_BOND_SLAVE_QUEUE_ID]); + char queue_id_str[IFNAMSIZ + 7]; + + /* queue_id option setting expects slave_name:queue_id */ + snprintf(queue_id_str, sizeof(queue_id_str), "%s:%u\n", + slave_dev->name, queue_id); + bond_opt_initstr(&newval, queue_id_str); + err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval); + if (err) + return err; + } + + return 0; +} + static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], struct nlattr *data[]) { @@ -412,6 +443,7 @@ static int bond_fill_info(struct sk_buff *skb, unsigned int packets_per_slave; int ifindex, i, targets_added; struct nlattr *targets; + struct slave *primary; if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond))) goto nla_put_failure; @@ -461,9 +493,9 @@ static int bond_fill_info(struct sk_buff *skb, bond->params.arp_all_targets)) goto nla_put_failure; - if (bond->primary_slave && - nla_put_u32(skb, IFLA_BOND_PRIMARY, - bond->primary_slave->dev->ifindex)) + primary = rtnl_dereference(bond->primary_slave); + if (primary && + nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT, @@ -562,6 +594,9 @@ struct rtnl_link_ops bond_link_ops __read_mostly = { .get_num_tx_queues = bond_get_num_tx_queues, .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number as for TX queues */ + .slave_maxtype = IFLA_BOND_SLAVE_MAX, + .slave_policy = bond_slave_policy, + .slave_changelink = bond_slave_changelink, .get_slave_size = bond_get_slave_size, .fill_slave_info = bond_fill_slave_info, }; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index dc73463c2c23..b62697f4a3de 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -625,6 +625,8 @@ int __bond_opt_set(struct bonding *bond, out: if (ret) bond_opt_error_interpret(bond, opt, ret, val); + else + call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev); return ret; } @@ -732,15 +734,13 @@ static int bond_option_active_slave_set(struct bonding *bond, } block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); - /* check to see if we are clearing active */ if (!slave_dev) { netdev_info(bond->dev, "Clearing current active slave\n"); RCU_INIT_POINTER(bond->curr_active_slave, NULL); bond_select_active_slave(bond); } else { - struct slave *old_active = bond_deref_active_protected(bond); + struct slave *old_active = rtnl_dereference(bond->curr_active_slave); struct slave *new_active = bond_slave_get_rtnl(slave_dev); BUG_ON(!new_active); @@ -763,8 +763,6 @@ static int bond_option_active_slave_set(struct bonding *bond, } } } - - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); return ret; @@ -953,14 +951,7 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target) static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target) { - int ret; - - /* not to race with bond_arp_rcv */ - write_lock_bh(&bond->lock); - ret = _bond_option_arp_ip_target_add(bond, target); - write_unlock_bh(&bond->lock); - - return ret; + return _bond_option_arp_ip_target_add(bond, target); } static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) @@ -989,9 +980,6 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) netdev_info(bond->dev, "Removing ARP target %pI4\n", &target); - /* not to race with bond_arp_rcv */ - write_lock_bh(&bond->lock); - bond_for_each_slave(bond, slave, iter) { targets_rx = slave->target_last_arp_rx; for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++) @@ -1002,8 +990,6 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) targets[i] = targets[i+1]; targets[i] = 0; - write_unlock_bh(&bond->lock); - return 0; } @@ -1011,11 +997,8 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond) { int i; - /* not to race with bond_arp_rcv */ - write_lock_bh(&bond->lock); for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) _bond_options_arp_ip_target_set(bond, i, 0, 0); - write_unlock_bh(&bond->lock); } static int bond_option_arp_ip_targets_set(struct bonding *bond, @@ -1079,8 +1062,6 @@ static int bond_option_primary_set(struct bonding *bond, struct slave *slave; block_netpoll_tx(); - read_lock(&bond->lock); - write_lock_bh(&bond->curr_slave_lock); p = strchr(primary, '\n'); if (p) @@ -1088,7 +1069,7 @@ static int bond_option_primary_set(struct bonding *bond, /* check to see if we are clearing primary */ if (!strlen(primary)) { netdev_info(bond->dev, "Setting primary slave to None\n"); - bond->primary_slave = NULL; + RCU_INIT_POINTER(bond->primary_slave, NULL); memset(bond->params.primary, 0, sizeof(bond->params.primary)); bond_select_active_slave(bond); goto out; @@ -1098,16 +1079,16 @@ static int bond_option_primary_set(struct bonding *bond, if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) { netdev_info(bond->dev, "Setting %s as primary slave\n", slave->dev->name); - bond->primary_slave = slave; + rcu_assign_pointer(bond->primary_slave, slave); strcpy(bond->params.primary, slave->dev->name); bond_select_active_slave(bond); goto out; } } - if (bond->primary_slave) { + if (rtnl_dereference(bond->primary_slave)) { netdev_info(bond->dev, "Setting primary slave to None\n"); - bond->primary_slave = NULL; + RCU_INIT_POINTER(bond->primary_slave, NULL); bond_select_active_slave(bond); } strncpy(bond->params.primary, primary, IFNAMSIZ); @@ -1117,8 +1098,6 @@ static int bond_option_primary_set(struct bonding *bond, primary, bond->dev->name); out: - write_unlock_bh(&bond->curr_slave_lock); - read_unlock(&bond->lock); unblock_netpoll_tx(); return 0; @@ -1132,9 +1111,7 @@ static int bond_option_primary_reselect_set(struct bonding *bond, bond->params.primary_reselect = newval->value; block_netpoll_tx(); - write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); - write_unlock_bh(&bond->curr_slave_lock); unblock_netpoll_tx(); return 0; diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index de62c0385dfb..bb09d0442aa8 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c @@ -7,21 +7,18 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) - __acquires(&bond->lock) { struct bonding *bond = seq->private; struct list_head *iter; struct slave *slave; loff_t off = 0; - /* make sure the bond won't be taken away */ rcu_read_lock(); - read_lock(&bond->lock); if (*pos == 0) return SEQ_START_TOKEN; - bond_for_each_slave(bond, slave, iter) + bond_for_each_slave_rcu(bond, slave, iter) if (++off == *pos) return slave; @@ -37,12 +34,9 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++*pos; if (v == SEQ_START_TOKEN) - return bond_first_slave(bond); + return bond_first_slave_rcu(bond); - if (bond_is_last_slave(bond, v)) - return NULL; - - bond_for_each_slave(bond, slave, iter) { + bond_for_each_slave_rcu(bond, slave, iter) { if (found) return slave; if (slave == v) @@ -53,12 +47,8 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static void bond_info_seq_stop(struct seq_file *seq, void *v) - __releases(&bond->lock) __releases(RCU) { - struct bonding *bond = seq->private; - - read_unlock(&bond->lock); rcu_read_unlock(); } @@ -66,7 +56,7 @@ static void bond_info_show_master(struct seq_file *seq) { struct bonding *bond = seq->private; const struct bond_opt_value *optval; - struct slave *curr; + struct slave *curr, *primary; int i; curr = rcu_dereference(bond->curr_active_slave); @@ -92,10 +82,10 @@ static void bond_info_show_master(struct seq_file *seq) } if (bond_uses_primary(bond)) { + primary = rcu_dereference(bond->primary_slave); seq_printf(seq, "Primary Slave: %s", - (bond->primary_slave) ? - bond->primary_slave->dev->name : "None"); - if (bond->primary_slave) { + primary ? primary->dev->name : "None"); + if (primary) { optval = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT, bond->params.primary_reselect); seq_printf(seq, " (primary_reselect %s)", diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 98db8edd9c75..8ffbafd500fd 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -91,7 +91,6 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna * creates and deletes entire bonds. * * The class parameter is ignored. - * */ static ssize_t bonding_store_bonds(struct class *cls, struct class_attribute *attr, @@ -425,11 +424,15 @@ static ssize_t bonding_show_primary(struct device *d, struct device_attribute *attr, char *buf) { - int count = 0; struct bonding *bond = to_bond(d); + struct slave *primary; + int count = 0; - if (bond->primary_slave) - count = sprintf(buf, "%s\n", bond->primary_slave->dev->name); + rcu_read_lock(); + primary = rcu_dereference(bond->primary_slave); + if (primary) + count = sprintf(buf, "%s\n", primary->dev->name); + rcu_read_unlock(); return count; } diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index aace510d08d1..6140bf0264a4 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -83,7 +83,7 @@ * @pos: current slave * @iter: list_head * iterator * - * Caller must hold bond->lock + * Caller must hold RTNL */ #define bond_for_each_slave(bond, pos, iter) \ netdev_for_each_lower_private((bond)->dev, pos, iter) @@ -184,24 +184,25 @@ struct slave { /* * Here are the locking policies for the two bonding locks: - * - * 1) Get bond->lock when reading/writing slave list. - * 2) Get bond->curr_slave_lock when reading/writing bond->curr_active_slave. - * (It is unnecessary when the write-lock is put with bond->lock.) - * 3) When we lock with bond->curr_slave_lock, we must lock with bond->lock - * beforehand. + * Get rcu_read_lock when reading or RTNL when writing slave list. */ struct bonding { struct net_device *dev; /* first - useful for panic debug */ struct slave __rcu *curr_active_slave; struct slave __rcu *current_arp_slave; - struct slave *primary_slave; + struct slave __rcu *primary_slave; bool force_primary; s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ int (*recv_probe)(const struct sk_buff *, struct bonding *, struct slave *); - rwlock_t lock; - rwlock_t curr_slave_lock; + /* mode_lock is used for mode-specific locking needs, currently used by: + * 3ad mode (4) - protect against running bond_3ad_unbind_slave() and + * bond_3ad_state_machine_handler() concurrently and also + * the access to the state machine shared variables. + * TLB mode (5) - to sync the use and modifications of its hash table + * ALB mode (6) - to sync the use and modifications of its hash table + */ + spinlock_t mode_lock; u8 send_peer_notif; u8 igmp_retrans; #ifdef CONFIG_PROC_FS @@ -231,10 +232,6 @@ struct bonding { #define bond_slave_get_rtnl(dev) \ ((struct slave *) rtnl_dereference(dev->rx_handler_data)) -#define bond_deref_active_protected(bond) \ - rcu_dereference_protected(bond->curr_active_slave, \ - lockdep_is_held(&bond->curr_slave_lock)) - struct bond_vlan_tag { __be16 vlan_proto; unsigned short vlan_id; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 41688229c570..e78d6b32431d 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -143,6 +143,8 @@ source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/c_can/Kconfig" +source "drivers/net/can/m_can/Kconfig" + source "drivers/net/can/cc770/Kconfig" source "drivers/net/can/spi/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 1697f22353a9..fc9304143f44 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -17,6 +17,7 @@ obj-y += softing/ obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_C_CAN) += c_can/ +obj-$(CONFIG_CAN_M_CAN) += m_can/ obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o @@ -28,4 +29,4 @@ obj-$(CONFIG_CAN_GRCAN) += grcan.o obj-$(CONFIG_CAN_RCAR) += rcar_can.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG +subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile index ad1cc842170a..9fdc678b5b37 100644 --- a/drivers/net/can/c_can/Makefile +++ b/drivers/net/can/c_can/Makefile @@ -5,5 +5,3 @@ obj-$(CONFIG_CAN_C_CAN) += c_can.o obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile index 9fb8321b33eb..8657f879ae19 100644 --- a/drivers/net/can/cc770/Makefile +++ b/drivers/net/can/cc770/Makefile @@ -5,5 +5,3 @@ obj-$(CONFIG_CAN_CC770) += cc770.o obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 9f91fcba43f8..02492d241e4c 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -103,11 +103,11 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { struct can_priv *priv = netdev_priv(dev); - long rate, best_rate = 0; long best_error = 1000000000, error = 0; int best_tseg = 0, best_brp = 0, brp = 0; int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0; int spt_error = 1000, spt = 0, sampl_pt; + long rate; u64 v64; /* Use CIA recommended sample points */ @@ -152,7 +152,6 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, } best_tseg = tseg / 2; best_brp = brp; - best_rate = rate; if (error == 0) break; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 6586309329e6..60f86bd0434a 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -92,6 +92,27 @@ #define FLEXCAN_CTRL_ERR_ALL \ (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE) +/* FLEXCAN control register 2 (CTRL2) bits */ +#define FLEXCAN_CRL2_ECRWRE BIT(29) +#define FLEXCAN_CRL2_WRMFRZ BIT(28) +#define FLEXCAN_CRL2_RFFN(x) (((x) & 0x0f) << 24) +#define FLEXCAN_CRL2_TASD(x) (((x) & 0x1f) << 19) +#define FLEXCAN_CRL2_MRP BIT(18) +#define FLEXCAN_CRL2_RRS BIT(17) +#define FLEXCAN_CRL2_EACEN BIT(16) + +/* FLEXCAN memory error control register (MECR) bits */ +#define FLEXCAN_MECR_ECRWRDIS BIT(31) +#define FLEXCAN_MECR_HANCEI_MSK BIT(19) +#define FLEXCAN_MECR_FANCEI_MSK BIT(18) +#define FLEXCAN_MECR_CEI_MSK BIT(16) +#define FLEXCAN_MECR_HAERRIE BIT(15) +#define FLEXCAN_MECR_FAERRIE BIT(14) +#define FLEXCAN_MECR_EXTERRIE BIT(13) +#define FLEXCAN_MECR_RERRDIS BIT(9) +#define FLEXCAN_MECR_ECCDIS BIT(8) +#define FLEXCAN_MECR_NCEFAFRZ BIT(7) + /* FLEXCAN error and status register (ESR) bits */ #define FLEXCAN_ESR_TWRN_INT BIT(17) #define FLEXCAN_ESR_RWRN_INT BIT(16) @@ -163,18 +184,20 @@ * FLEXCAN hardware feature flags * * Below is some version info we got: - * SOC Version IP-Version Glitch- [TR]WRN_INT - * Filter? connected? - * MX25 FlexCAN2 03.00.00.00 no no - * MX28 FlexCAN2 03.00.04.00 yes yes - * MX35 FlexCAN2 03.00.00.00 no no - * MX53 FlexCAN2 03.00.00.00 yes no - * MX6s FlexCAN3 10.00.12.00 yes yes + * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err + * Filter? connected? detection + * MX25 FlexCAN2 03.00.00.00 no no no + * MX28 FlexCAN2 03.00.04.00 yes yes no + * MX35 FlexCAN2 03.00.00.00 no no no + * MX53 FlexCAN2 03.00.00.00 yes no no + * MX6s FlexCAN3 10.00.12.00 yes yes no + * VF610 FlexCAN3 ? no yes yes * * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. */ #define FLEXCAN_HAS_V10_FEATURES BIT(1) /* For core version >= 10 */ #define FLEXCAN_HAS_BROKEN_ERR_STATE BIT(2) /* [TR]WRN_INT not connected */ +#define FLEXCAN_HAS_MECR_FEATURES BIT(3) /* Memory error detection */ /* Structure of the message buffer */ struct flexcan_mb { @@ -205,8 +228,17 @@ struct flexcan_regs { u32 crcr; /* 0x44 */ u32 rxfgmask; /* 0x48 */ u32 rxfir; /* 0x4c */ - u32 _reserved3[12]; - struct flexcan_mb cantxfg[64]; + u32 _reserved3[12]; /* 0x50 */ + struct flexcan_mb cantxfg[64]; /* 0x80 */ + u32 _reserved4[408]; + u32 mecr; /* 0xae0 */ + u32 erriar; /* 0xae4 */ + u32 erridpr; /* 0xae8 */ + u32 errippr; /* 0xaec */ + u32 rerrar; /* 0xaf0 */ + u32 rerrdr; /* 0xaf4 */ + u32 rerrsynr; /* 0xaf8 */ + u32 errsr; /* 0xafc */ }; struct flexcan_devtype_data { @@ -236,6 +268,9 @@ static struct flexcan_devtype_data fsl_imx28_devtype_data; static struct flexcan_devtype_data fsl_imx6q_devtype_data = { .features = FLEXCAN_HAS_V10_FEATURES, }; +static struct flexcan_devtype_data fsl_vf610_devtype_data = { + .features = FLEXCAN_HAS_V10_FEATURES | FLEXCAN_HAS_MECR_FEATURES, +}; static const struct can_bittiming_const flexcan_bittiming_const = { .name = DRV_NAME, @@ -391,8 +426,9 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv) return 0; } -static int flexcan_get_berr_counter(const struct net_device *dev, - struct can_berr_counter *bec) + +static int __flexcan_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; @@ -404,6 +440,29 @@ static int flexcan_get_berr_counter(const struct net_device *dev, return 0; } +static int flexcan_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + const struct flexcan_priv *priv = netdev_priv(dev); + int err; + + err = clk_prepare_enable(priv->clk_ipg); + if (err) + return err; + + err = clk_prepare_enable(priv->clk_per); + if (err) + goto out_disable_ipg; + + err = __flexcan_get_berr_counter(dev, bec); + + clk_disable_unprepare(priv->clk_per); + out_disable_ipg: + clk_disable_unprepare(priv->clk_ipg); + + return err; +} + static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); @@ -524,7 +583,7 @@ static void do_state(struct net_device *dev, struct flexcan_priv *priv = netdev_priv(dev); struct can_berr_counter bec; - flexcan_get_berr_counter(dev, &bec); + __flexcan_get_berr_counter(dev, &bec); switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: @@ -823,9 +882,8 @@ static int flexcan_chip_start(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; - int err; - u32 reg_mcr, reg_ctrl; - int i; + u32 reg_mcr, reg_ctrl, reg_crl2, reg_mecr; + int err, i; /* enable module */ err = flexcan_chip_enable(priv); @@ -914,6 +972,31 @@ static int flexcan_chip_start(struct net_device *dev) if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) flexcan_write(0x0, ®s->rxfgmask); + /* + * On Vybrid, disable memory error detection interrupts + * and freeze mode. + * This also works around errata e5295 which generates + * false positive memory errors and put the device in + * freeze mode. + */ + if (priv->devtype_data->features & FLEXCAN_HAS_MECR_FEATURES) { + /* + * Follow the protocol as described in "Detection + * and Correction of Memory Errors" to write to + * MECR register + */ + reg_crl2 = flexcan_read(®s->crl2); + reg_crl2 |= FLEXCAN_CRL2_ECRWRE; + flexcan_write(reg_crl2, ®s->crl2); + + reg_mecr = flexcan_read(®s->mecr); + reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; + flexcan_write(reg_mecr, ®s->mecr); + reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | + FLEXCAN_MECR_FANCEI_MSK); + flexcan_write(reg_mecr, ®s->mecr); + } + err = flexcan_transceiver_enable(priv); if (err) goto out_chip_disable; @@ -1124,6 +1207,7 @@ static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, + { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, flexcan_of_match); diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig new file mode 100644 index 000000000000..fca5482c09ac --- /dev/null +++ b/drivers/net/can/m_can/Kconfig @@ -0,0 +1,4 @@ +config CAN_M_CAN + tristate "Bosch M_CAN devices" + ---help--- + Say Y here if you want to support for Bosch M_CAN controller. diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile new file mode 100644 index 000000000000..8bbd7f24f5be --- /dev/null +++ b/drivers/net/can/m_can/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Bosch M_CAN controller driver. +# + +obj-$(CONFIG_CAN_M_CAN) += m_can.o diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c new file mode 100644 index 000000000000..10d571eaed85 --- /dev/null +++ b/drivers/net/can/m_can/m_can.c @@ -0,0 +1,1202 @@ +/* + * CAN bus driver for Bosch M_CAN controller + * + * Copyright (C) 2014 Freescale Semiconductor, Inc. + * Dong Aisheng <b29396@freescale.com> + * + * Bosch M_CAN user manual can be obtained from: + * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/ + * mcan_users_manual_v302.pdf + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#include <linux/can/dev.h> + +/* napi related */ +#define M_CAN_NAPI_WEIGHT 64 + +/* message ram configuration data length */ +#define MRAM_CFG_LEN 8 + +/* registers definition */ +enum m_can_reg { + M_CAN_CREL = 0x0, + M_CAN_ENDN = 0x4, + M_CAN_CUST = 0x8, + M_CAN_FBTP = 0xc, + M_CAN_TEST = 0x10, + M_CAN_RWD = 0x14, + M_CAN_CCCR = 0x18, + M_CAN_BTP = 0x1c, + M_CAN_TSCC = 0x20, + M_CAN_TSCV = 0x24, + M_CAN_TOCC = 0x28, + M_CAN_TOCV = 0x2c, + M_CAN_ECR = 0x40, + M_CAN_PSR = 0x44, + M_CAN_IR = 0x50, + M_CAN_IE = 0x54, + M_CAN_ILS = 0x58, + M_CAN_ILE = 0x5c, + M_CAN_GFC = 0x80, + M_CAN_SIDFC = 0x84, + M_CAN_XIDFC = 0x88, + M_CAN_XIDAM = 0x90, + M_CAN_HPMS = 0x94, + M_CAN_NDAT1 = 0x98, + M_CAN_NDAT2 = 0x9c, + M_CAN_RXF0C = 0xa0, + M_CAN_RXF0S = 0xa4, + M_CAN_RXF0A = 0xa8, + M_CAN_RXBC = 0xac, + M_CAN_RXF1C = 0xb0, + M_CAN_RXF1S = 0xb4, + M_CAN_RXF1A = 0xb8, + M_CAN_RXESC = 0xbc, + M_CAN_TXBC = 0xc0, + M_CAN_TXFQS = 0xc4, + M_CAN_TXESC = 0xc8, + M_CAN_TXBRP = 0xcc, + M_CAN_TXBAR = 0xd0, + M_CAN_TXBCR = 0xd4, + M_CAN_TXBTO = 0xd8, + M_CAN_TXBCF = 0xdc, + M_CAN_TXBTIE = 0xe0, + M_CAN_TXBCIE = 0xe4, + M_CAN_TXEFC = 0xf0, + M_CAN_TXEFS = 0xf4, + M_CAN_TXEFA = 0xf8, +}; + +/* m_can lec values */ +enum m_can_lec_type { + LEC_NO_ERROR = 0, + LEC_STUFF_ERROR, + LEC_FORM_ERROR, + LEC_ACK_ERROR, + LEC_BIT1_ERROR, + LEC_BIT0_ERROR, + LEC_CRC_ERROR, + LEC_UNUSED, +}; + +enum m_can_mram_cfg { + MRAM_SIDF = 0, + MRAM_XIDF, + MRAM_RXF0, + MRAM_RXF1, + MRAM_RXB, + MRAM_TXE, + MRAM_TXB, + MRAM_CFG_NUM, +}; + +/* Test Register (TEST) */ +#define TEST_LBCK BIT(4) + +/* CC Control Register(CCCR) */ +#define CCCR_TEST BIT(7) +#define CCCR_MON BIT(5) +#define CCCR_CCE BIT(1) +#define CCCR_INIT BIT(0) + +/* Bit Timing & Prescaler Register (BTP) */ +#define BTR_BRP_MASK 0x3ff +#define BTR_BRP_SHIFT 16 +#define BTR_TSEG1_SHIFT 8 +#define BTR_TSEG1_MASK (0x3f << BTR_TSEG1_SHIFT) +#define BTR_TSEG2_SHIFT 4 +#define BTR_TSEG2_MASK (0xf << BTR_TSEG2_SHIFT) +#define BTR_SJW_SHIFT 0 +#define BTR_SJW_MASK 0xf + +/* Error Counter Register(ECR) */ +#define ECR_RP BIT(15) +#define ECR_REC_SHIFT 8 +#define ECR_REC_MASK (0x7f << ECR_REC_SHIFT) +#define ECR_TEC_SHIFT 0 +#define ECR_TEC_MASK 0xff + +/* Protocol Status Register(PSR) */ +#define PSR_BO BIT(7) +#define PSR_EW BIT(6) +#define PSR_EP BIT(5) +#define PSR_LEC_MASK 0x7 + +/* Interrupt Register(IR) */ +#define IR_ALL_INT 0xffffffff +#define IR_STE BIT(31) +#define IR_FOE BIT(30) +#define IR_ACKE BIT(29) +#define IR_BE BIT(28) +#define IR_CRCE BIT(27) +#define IR_WDI BIT(26) +#define IR_BO BIT(25) +#define IR_EW BIT(24) +#define IR_EP BIT(23) +#define IR_ELO BIT(22) +#define IR_BEU BIT(21) +#define IR_BEC BIT(20) +#define IR_DRX BIT(19) +#define IR_TOO BIT(18) +#define IR_MRAF BIT(17) +#define IR_TSW BIT(16) +#define IR_TEFL BIT(15) +#define IR_TEFF BIT(14) +#define IR_TEFW BIT(13) +#define IR_TEFN BIT(12) +#define IR_TFE BIT(11) +#define IR_TCF BIT(10) +#define IR_TC BIT(9) +#define IR_HPM BIT(8) +#define IR_RF1L BIT(7) +#define IR_RF1F BIT(6) +#define IR_RF1W BIT(5) +#define IR_RF1N BIT(4) +#define IR_RF0L BIT(3) +#define IR_RF0F BIT(2) +#define IR_RF0W BIT(1) +#define IR_RF0N BIT(0) +#define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) +#define IR_ERR_LEC (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) +#define IR_ERR_BUS (IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \ + IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ + IR_RF1L | IR_RF0L) +#define IR_ERR_ALL (IR_ERR_STATE | IR_ERR_BUS) + +/* Interrupt Line Select (ILS) */ +#define ILS_ALL_INT0 0x0 +#define ILS_ALL_INT1 0xFFFFFFFF + +/* Interrupt Line Enable (ILE) */ +#define ILE_EINT0 BIT(0) +#define ILE_EINT1 BIT(1) + +/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ +#define RXFC_FWM_OFF 24 +#define RXFC_FWM_MASK 0x7f +#define RXFC_FWM_1 (1 << RXFC_FWM_OFF) +#define RXFC_FS_OFF 16 +#define RXFC_FS_MASK 0x7f + +/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ +#define RXFS_RFL BIT(25) +#define RXFS_FF BIT(24) +#define RXFS_FPI_OFF 16 +#define RXFS_FPI_MASK 0x3f0000 +#define RXFS_FGI_OFF 8 +#define RXFS_FGI_MASK 0x3f00 +#define RXFS_FFL_MASK 0x7f + +/* Rx Buffer / FIFO Element Size Configuration (RXESC) */ +#define M_CAN_RXESC_8BYTES 0x0 + +/* Tx Buffer Configuration(TXBC) */ +#define TXBC_NDTB_OFF 16 +#define TXBC_NDTB_MASK 0x3f + +/* Tx Buffer Element Size Configuration(TXESC) */ +#define TXESC_TBDS_8BYTES 0x0 + +/* Tx Event FIFO Con.guration (TXEFC) */ +#define TXEFC_EFS_OFF 16 +#define TXEFC_EFS_MASK 0x3f + +/* Message RAM Configuration (in bytes) */ +#define SIDF_ELEMENT_SIZE 4 +#define XIDF_ELEMENT_SIZE 8 +#define RXF0_ELEMENT_SIZE 16 +#define RXF1_ELEMENT_SIZE 16 +#define RXB_ELEMENT_SIZE 16 +#define TXE_ELEMENT_SIZE 8 +#define TXB_ELEMENT_SIZE 16 + +/* Message RAM Elements */ +#define M_CAN_FIFO_ID 0x0 +#define M_CAN_FIFO_DLC 0x4 +#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) + +/* Rx Buffer Element */ +#define RX_BUF_ESI BIT(31) +#define RX_BUF_XTD BIT(30) +#define RX_BUF_RTR BIT(29) + +/* Tx Buffer Element */ +#define TX_BUF_XTD BIT(30) +#define TX_BUF_RTR BIT(29) + +/* address offset and element number for each FIFO/Buffer in the Message RAM */ +struct mram_cfg { + u16 off; + u8 num; +}; + +/* m_can private data structure */ +struct m_can_priv { + struct can_priv can; /* must be the first member */ + struct napi_struct napi; + struct net_device *dev; + struct device *device; + struct clk *hclk; + struct clk *cclk; + void __iomem *base; + u32 irqstatus; + + /* message ram configuration */ + void __iomem *mram_base; + struct mram_cfg mcfg[MRAM_CFG_NUM]; +}; + +static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg) +{ + return readl(priv->base + reg); +} + +static inline void m_can_write(const struct m_can_priv *priv, + enum m_can_reg reg, u32 val) +{ + writel(val, priv->base + reg); +} + +static inline u32 m_can_fifo_read(const struct m_can_priv *priv, + u32 fgi, unsigned int offset) +{ + return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off + + fgi * RXF0_ELEMENT_SIZE + offset); +} + +static inline void m_can_fifo_write(const struct m_can_priv *priv, + u32 fpi, unsigned int offset, u32 val) +{ + return writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off + + fpi * TXB_ELEMENT_SIZE + offset); +} + +static inline void m_can_config_endisable(const struct m_can_priv *priv, + bool enable) +{ + u32 cccr = m_can_read(priv, M_CAN_CCCR); + u32 timeout = 10; + u32 val = 0; + + if (enable) { + /* enable m_can configuration */ + m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); + /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ + m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); + } else { + m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); + } + + /* there's a delay for module initialization */ + if (enable) + val = CCCR_INIT | CCCR_CCE; + + while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { + if (timeout == 0) { + netdev_warn(priv->dev, "Failed to init module\n"); + return; + } + timeout--; + udelay(1); + } +} + +static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv) +{ + m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1); +} + +static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) +{ + m_can_write(priv, M_CAN_ILE, 0x0); +} + +static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf, + u32 rxfs) +{ + struct m_can_priv *priv = netdev_priv(dev); + u32 id, fgi; + + /* calculate the fifo get index for where to read data */ + fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; + id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); + if (id & RX_BUF_XTD) + cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (id >> 18) & CAN_SFF_MASK; + + if (id & RX_BUF_RTR) { + cf->can_id |= CAN_RTR_FLAG; + } else { + id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); + cf->can_dlc = get_can_dlc((id >> 16) & 0x0F); + *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi, + M_CAN_FIFO_DATA(0)); + *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi, + M_CAN_FIFO_DATA(1)); + } + + /* acknowledge rx fifo 0 */ + m_can_write(priv, M_CAN_RXF0A, fgi); +} + +static int m_can_do_rx_poll(struct net_device *dev, int quota) +{ + struct m_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct can_frame *frame; + u32 pkts = 0; + u32 rxfs; + + rxfs = m_can_read(priv, M_CAN_RXF0S); + if (!(rxfs & RXFS_FFL_MASK)) { + netdev_dbg(dev, "no messages in fifo0\n"); + return 0; + } + + while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { + if (rxfs & RXFS_RFL) + netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); + + skb = alloc_can_skb(dev, &frame); + if (!skb) { + stats->rx_dropped++; + return pkts; + } + + m_can_read_fifo(dev, frame, rxfs); + + stats->rx_packets++; + stats->rx_bytes += frame->can_dlc; + + netif_receive_skb(skb); + + quota--; + pkts++; + rxfs = m_can_read(priv, M_CAN_RXF0S); + } + + if (pkts) + can_led_event(dev, CAN_LED_EVENT_RX); + + return pkts; +} + +static int m_can_handle_lost_msg(struct net_device *dev) +{ + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct can_frame *frame; + + netdev_err(dev, "msg lost in rxf0\n"); + + stats->rx_errors++; + stats->rx_over_errors++; + + skb = alloc_can_err_skb(dev, &frame); + if (unlikely(!skb)) + return 0; + + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + netif_receive_skb(skb); + + return 1; +} + +static int m_can_handle_lec_err(struct net_device *dev, + enum m_can_lec_type lec_type) +{ + struct m_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + priv->can.can_stats.bus_error++; + stats->rx_errors++; + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + /* check for 'last error code' which tells us the + * type of the last error to occur on the CAN bus + */ + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + cf->data[2] |= CAN_ERR_PROT_UNSPEC; + + switch (lec_type) { + case LEC_STUFF_ERROR: + netdev_dbg(dev, "stuff error\n"); + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + case LEC_FORM_ERROR: + netdev_dbg(dev, "form error\n"); + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case LEC_ACK_ERROR: + netdev_dbg(dev, "ack error\n"); + cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | + CAN_ERR_PROT_LOC_ACK_DEL); + break; + case LEC_BIT1_ERROR: + netdev_dbg(dev, "bit1 error\n"); + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + case LEC_BIT0_ERROR: + netdev_dbg(dev, "bit0 error\n"); + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + case LEC_CRC_ERROR: + netdev_dbg(dev, "CRC error\n"); + cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | + CAN_ERR_PROT_LOC_CRC_DEL); + break; + default: + break; + } + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + + return 1; +} + +static int m_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct m_can_priv *priv = netdev_priv(dev); + unsigned int ecr; + int err; + + err = clk_prepare_enable(priv->hclk); + if (err) + return err; + + err = clk_prepare_enable(priv->cclk); + if (err) { + clk_disable_unprepare(priv->hclk); + return err; + } + + ecr = m_can_read(priv, M_CAN_ECR); + bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; + bec->txerr = ecr & ECR_TEC_MASK; + + clk_disable_unprepare(priv->cclk); + clk_disable_unprepare(priv->hclk); + + return 0; +} + +static int m_can_handle_state_change(struct net_device *dev, + enum can_state new_state) +{ + struct m_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + struct can_berr_counter bec; + unsigned int ecr; + + switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + /* error warning state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_WARNING; + break; + case CAN_STATE_ERROR_PASSIVE: + /* error passive state */ + priv->can.can_stats.error_passive++; + priv->can.state = CAN_STATE_ERROR_PASSIVE; + break; + case CAN_STATE_BUS_OFF: + /* bus-off state */ + priv->can.state = CAN_STATE_BUS_OFF; + m_can_disable_all_interrupts(priv); + can_bus_off(dev); + break; + default: + break; + } + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + m_can_get_berr_counter(dev, &bec); + + switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + /* error warning state */ + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_ERROR_PASSIVE: + /* error passive state */ + cf->can_id |= CAN_ERR_CRTL; + ecr = m_can_read(priv, M_CAN_ECR); + if (ecr & ECR_RP) + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + if (bec.txerr > 127) + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_BUS_OFF: + /* bus-off state */ + cf->can_id |= CAN_ERR_BUSOFF; + break; + default: + break; + } + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + + return 1; +} + +static int m_can_handle_state_errors(struct net_device *dev, u32 psr) +{ + struct m_can_priv *priv = netdev_priv(dev); + int work_done = 0; + + if ((psr & PSR_EW) && + (priv->can.state != CAN_STATE_ERROR_WARNING)) { + netdev_dbg(dev, "entered error warning state\n"); + work_done += m_can_handle_state_change(dev, + CAN_STATE_ERROR_WARNING); + } + + if ((psr & PSR_EP) && + (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { + netdev_dbg(dev, "entered error warning state\n"); + work_done += m_can_handle_state_change(dev, + CAN_STATE_ERROR_PASSIVE); + } + + if ((psr & PSR_BO) && + (priv->can.state != CAN_STATE_BUS_OFF)) { + netdev_dbg(dev, "entered error warning state\n"); + work_done += m_can_handle_state_change(dev, + CAN_STATE_BUS_OFF); + } + + return work_done; +} + +static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) +{ + if (irqstatus & IR_WDI) + netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); + if (irqstatus & IR_BEU) + netdev_err(dev, "Error Logging Overflow\n"); + if (irqstatus & IR_BEU) + netdev_err(dev, "Bit Error Uncorrected\n"); + if (irqstatus & IR_BEC) + netdev_err(dev, "Bit Error Corrected\n"); + if (irqstatus & IR_TOO) + netdev_err(dev, "Timeout reached\n"); + if (irqstatus & IR_MRAF) + netdev_err(dev, "Message RAM access failure occurred\n"); +} + +static inline bool is_lec_err(u32 psr) +{ + psr &= LEC_UNUSED; + + return psr && (psr != LEC_UNUSED); +} + +static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, + u32 psr) +{ + struct m_can_priv *priv = netdev_priv(dev); + int work_done = 0; + + if (irqstatus & IR_RF0L) + work_done += m_can_handle_lost_msg(dev); + + /* handle lec errors on the bus */ + if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + is_lec_err(psr)) + work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); + + /* other unproccessed error interrupts */ + m_can_handle_other_err(dev, irqstatus); + + return work_done; +} + +static int m_can_poll(struct napi_struct *napi, int quota) +{ + struct net_device *dev = napi->dev; + struct m_can_priv *priv = netdev_priv(dev); + int work_done = 0; + u32 irqstatus, psr; + + irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR); + if (!irqstatus) + goto end; + + psr = m_can_read(priv, M_CAN_PSR); + if (irqstatus & IR_ERR_STATE) + work_done += m_can_handle_state_errors(dev, psr); + + if (irqstatus & IR_ERR_BUS) + work_done += m_can_handle_bus_errors(dev, irqstatus, psr); + + if (irqstatus & IR_RF0N) + work_done += m_can_do_rx_poll(dev, (quota - work_done)); + + if (work_done < quota) { + napi_complete(napi); + m_can_enable_all_interrupts(priv); + } + +end: + return work_done; +} + +static irqreturn_t m_can_isr(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct m_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + u32 ir; + + ir = m_can_read(priv, M_CAN_IR); + if (!ir) + return IRQ_NONE; + + /* ACK all irqs */ + if (ir & IR_ALL_INT) + m_can_write(priv, M_CAN_IR, ir); + + /* schedule NAPI in case of + * - rx IRQ + * - state change IRQ + * - bus error IRQ and bus error reporting + */ + if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) { + priv->irqstatus = ir; + m_can_disable_all_interrupts(priv); + napi_schedule(&priv->napi); + } + + /* transmission complete interrupt */ + if (ir & IR_TC) { + stats->tx_bytes += can_get_echo_skb(dev, 0); + stats->tx_packets++; + can_led_event(dev, CAN_LED_EVENT_TX); + netif_wake_queue(dev); + } + + return IRQ_HANDLED; +} + +static const struct can_bittiming_const m_can_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 64, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +static int m_can_set_bittiming(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + const struct can_bittiming *bt = &priv->can.bittiming; + u16 brp, sjw, tseg1, tseg2; + u32 reg_btp; + + brp = bt->brp - 1; + sjw = bt->sjw - 1; + tseg1 = bt->prop_seg + bt->phase_seg1 - 1; + tseg2 = bt->phase_seg2 - 1; + reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | + (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); + m_can_write(priv, M_CAN_BTP, reg_btp); + netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp); + + return 0; +} + +/* Configure M_CAN chip: + * - set rx buffer/fifo element size + * - configure rx fifo + * - accept non-matching frame into fifo 0 + * - configure tx buffer + * - configure mode + * - setup bittiming + */ +static void m_can_chip_config(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + u32 cccr, test; + + m_can_config_endisable(priv, true); + + /* RX Buffer/FIFO Element Size 8 bytes data field */ + m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES); + + /* Accept Non-matching Frames Into FIFO 0 */ + m_can_write(priv, M_CAN_GFC, 0x0); + + /* only support one Tx Buffer currently */ + m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | + priv->mcfg[MRAM_TXB].off); + + /* only support 8 bytes firstly */ + m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES); + + m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | + priv->mcfg[MRAM_TXE].off); + + /* rx fifo configuration, blocking mode, fifo size 1 */ + m_can_write(priv, M_CAN_RXF0C, + (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) | + RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off); + + m_can_write(priv, M_CAN_RXF1C, + (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) | + RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); + + cccr = m_can_read(priv, M_CAN_CCCR); + cccr &= ~(CCCR_TEST | CCCR_MON); + test = m_can_read(priv, M_CAN_TEST); + test &= ~TEST_LBCK; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + cccr |= CCCR_MON; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + cccr |= CCCR_TEST; + test |= TEST_LBCK; + } + + m_can_write(priv, M_CAN_CCCR, cccr); + m_can_write(priv, M_CAN_TEST, test); + + /* enable interrupts */ + m_can_write(priv, M_CAN_IR, IR_ALL_INT); + if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) + m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC); + else + m_can_write(priv, M_CAN_IE, IR_ALL_INT); + + /* route all interrupts to INT0 */ + m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0); + + /* set bittiming params */ + m_can_set_bittiming(dev); + + m_can_config_endisable(priv, false); +} + +static void m_can_start(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + + /* basic m_can configuration */ + m_can_chip_config(dev); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + m_can_enable_all_interrupts(priv); +} + +static int m_can_set_mode(struct net_device *dev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + m_can_start(dev); + netif_wake_queue(dev); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void free_m_can_dev(struct net_device *dev) +{ + free_candev(dev); +} + +static struct net_device *alloc_m_can_dev(void) +{ + struct net_device *dev; + struct m_can_priv *priv; + + dev = alloc_candev(sizeof(*priv), 1); + if (!dev) + return NULL; + + priv = netdev_priv(dev); + netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); + + priv->dev = dev; + priv->can.bittiming_const = &m_can_bittiming_const; + priv->can.do_set_mode = m_can_set_mode; + priv->can.do_get_berr_counter = m_can_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING; + + return dev; +} + +static int m_can_open(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + int err; + + err = clk_prepare_enable(priv->hclk); + if (err) + return err; + + err = clk_prepare_enable(priv->cclk); + if (err) + goto exit_disable_hclk; + + /* open the can device */ + err = open_candev(dev); + if (err) { + netdev_err(dev, "failed to open can device\n"); + goto exit_disable_cclk; + } + + /* register interrupt handler */ + err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, + dev); + if (err < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto exit_irq_fail; + } + + /* start the m_can controller */ + m_can_start(dev); + + can_led_event(dev, CAN_LED_EVENT_OPEN); + napi_enable(&priv->napi); + netif_start_queue(dev); + + return 0; + +exit_irq_fail: + close_candev(dev); +exit_disable_cclk: + clk_disable_unprepare(priv->cclk); +exit_disable_hclk: + clk_disable_unprepare(priv->hclk); + return err; +} + +static void m_can_stop(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + + /* disable all interrupts */ + m_can_disable_all_interrupts(priv); + + clk_disable_unprepare(priv->hclk); + clk_disable_unprepare(priv->cclk); + + /* set the state as STOPPED */ + priv->can.state = CAN_STATE_STOPPED; +} + +static int m_can_close(struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + m_can_stop(dev); + free_irq(dev->irq, dev); + close_candev(dev); + can_led_event(dev, CAN_LED_EVENT_STOP); + + return 0; +} + +static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct m_can_priv *priv = netdev_priv(dev); + struct can_frame *cf = (struct can_frame *)skb->data; + u32 id; + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + netif_stop_queue(dev); + + if (cf->can_id & CAN_EFF_FLAG) { + id = cf->can_id & CAN_EFF_MASK; + id |= TX_BUF_XTD; + } else { + id = ((cf->can_id & CAN_SFF_MASK) << 18); + } + + if (cf->can_id & CAN_RTR_FLAG) + id |= TX_BUF_RTR; + + /* message ram configuration */ + m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0)); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4)); + can_put_echo_skb(skb, dev, 0); + + /* enable first TX buffer to start transfer */ + m_can_write(priv, M_CAN_TXBTIE, 0x1); + m_can_write(priv, M_CAN_TXBAR, 0x1); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops m_can_netdev_ops = { + .ndo_open = m_can_open, + .ndo_stop = m_can_close, + .ndo_start_xmit = m_can_start_xmit, +}; + +static int register_m_can_dev(struct net_device *dev) +{ + dev->flags |= IFF_ECHO; /* we support local echo */ + dev->netdev_ops = &m_can_netdev_ops; + + return register_candev(dev); +} + +static int m_can_of_parse_mram(struct platform_device *pdev, + struct m_can_priv *priv) +{ + struct device_node *np = pdev->dev.of_node; + struct resource *res; + void __iomem *addr; + u32 out_val[MRAM_CFG_LEN]; + int ret; + + /* message ram could be shared */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); + if (!res) + return -ENODEV; + + addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!addr) + return -ENOMEM; + + /* get message ram configuration */ + ret = of_property_read_u32_array(np, "bosch,mram-cfg", + out_val, sizeof(out_val) / 4); + if (ret) { + dev_err(&pdev->dev, "can not get message ram configuration\n"); + return -ENODEV; + } + + priv->mram_base = addr; + priv->mcfg[MRAM_SIDF].off = out_val[0]; + priv->mcfg[MRAM_SIDF].num = out_val[1]; + priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off + + priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; + priv->mcfg[MRAM_XIDF].num = out_val[2]; + priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off + + priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; + priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK; + priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off + + priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; + priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK; + priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off + + priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; + priv->mcfg[MRAM_RXB].num = out_val[5]; + priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off + + priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; + priv->mcfg[MRAM_TXE].num = out_val[6]; + priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off + + priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; + priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK; + + dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", + priv->mram_base, + priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num, + priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num, + priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num, + priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num, + priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num, + priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, + priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); + + return 0; +} + +static int m_can_plat_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct m_can_priv *priv; + struct resource *res; + void __iomem *addr; + struct clk *hclk, *cclk; + int irq, ret; + + hclk = devm_clk_get(&pdev->dev, "hclk"); + cclk = devm_clk_get(&pdev->dev, "cclk"); + if (IS_ERR(hclk) || IS_ERR(cclk)) { + dev_err(&pdev->dev, "no clock find\n"); + return -ENODEV; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); + addr = devm_ioremap_resource(&pdev->dev, res); + irq = platform_get_irq_byname(pdev, "int0"); + if (IS_ERR(addr) || irq < 0) + return -EINVAL; + + /* allocate the m_can device */ + dev = alloc_m_can_dev(); + if (!dev) + return -ENOMEM; + + priv = netdev_priv(dev); + dev->irq = irq; + priv->base = addr; + priv->device = &pdev->dev; + priv->hclk = hclk; + priv->cclk = cclk; + priv->can.clock.freq = clk_get_rate(cclk); + + ret = m_can_of_parse_mram(pdev, priv); + if (ret) + goto failed_free_dev; + + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + ret = register_m_can_dev(dev); + if (ret) { + dev_err(&pdev->dev, "registering %s failed (err=%d)\n", + KBUILD_MODNAME, ret); + goto failed_free_dev; + } + + devm_can_led_init(dev); + + dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", + KBUILD_MODNAME, priv->base, dev->irq); + + return 0; + +failed_free_dev: + free_m_can_dev(dev); + return ret; +} + +static __maybe_unused int m_can_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_priv *priv = netdev_priv(ndev); + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + + /* TODO: enter low power */ + + priv->can.state = CAN_STATE_SLEEPING; + + return 0; +} + +static __maybe_unused int m_can_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct m_can_priv *priv = netdev_priv(ndev); + + /* TODO: exit low power */ + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} + +static void unregister_m_can_dev(struct net_device *dev) +{ + unregister_candev(dev); +} + +static int m_can_plat_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + unregister_m_can_dev(dev); + platform_set_drvdata(pdev, NULL); + + free_m_can_dev(dev); + + return 0; +} + +static const struct dev_pm_ops m_can_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) +}; + +static const struct of_device_id m_can_of_table[] = { + { .compatible = "bosch,m_can", .data = NULL }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, m_can_of_table); + +static struct platform_driver m_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = m_can_of_table, + .pm = &m_can_pmops, + }, + .probe = m_can_plat_probe, + .remove = m_can_plat_remove, +}; + +module_platform_driver(m_can_plat_driver); + +MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile index c9fab17cd8b4..58903b45f5fb 100644 --- a/drivers/net/can/mscan/Makefile +++ b/drivers/net/can/mscan/Makefile @@ -1,5 +1,3 @@ obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 5268d216ecfa..1abe133d1594 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -20,6 +20,7 @@ #include <linux/can/dev.h> #include <linux/clk.h> #include <linux/can/platform/rcar_can.h> +#include <linux/of.h> #define RCAR_CAN_DRV_NAME "rcar_can" @@ -87,6 +88,7 @@ struct rcar_can_priv { struct napi_struct napi; struct rcar_can_regs __iomem *regs; struct clk *clk; + struct clk *can_clk; u8 tx_dlc[RCAR_CAN_FIFO_DEPTH]; u32 tx_head; u32 tx_tail; @@ -505,14 +507,20 @@ static int rcar_can_open(struct net_device *ndev) err = clk_prepare_enable(priv->clk); if (err) { - netdev_err(ndev, "clk_prepare_enable() failed, error %d\n", + netdev_err(ndev, "failed to enable periperal clock, error %d\n", err); goto out; } + err = clk_prepare_enable(priv->can_clk); + if (err) { + netdev_err(ndev, "failed to enable CAN clock, error %d\n", + err); + goto out_clock; + } err = open_candev(ndev); if (err) { netdev_err(ndev, "open_candev() failed, error %d\n", err); - goto out_clock; + goto out_can_clock; } napi_enable(&priv->napi); err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); @@ -527,6 +535,8 @@ static int rcar_can_open(struct net_device *ndev) out_close: napi_disable(&priv->napi); close_candev(ndev); +out_can_clock: + clk_disable_unprepare(priv->can_clk); out_clock: clk_disable_unprepare(priv->clk); out: @@ -565,6 +575,7 @@ static int rcar_can_close(struct net_device *ndev) rcar_can_stop(ndev); free_irq(ndev->irq, ndev); napi_disable(&priv->napi); + clk_disable_unprepare(priv->can_clk); clk_disable_unprepare(priv->clk); close_candev(ndev); can_led_event(ndev, CAN_LED_EVENT_STOP); @@ -715,6 +726,12 @@ static int rcar_can_get_berr_counter(const struct net_device *dev, return 0; } +static const char * const clock_names[] = { + [CLKR_CLKP1] = "clkp1", + [CLKR_CLKP2] = "clkp2", + [CLKR_CLKEXT] = "can_clk", +}; + static int rcar_can_probe(struct platform_device *pdev) { struct rcar_can_platform_data *pdata; @@ -722,13 +739,20 @@ static int rcar_can_probe(struct platform_device *pdev) struct net_device *ndev; struct resource *mem; void __iomem *addr; + u32 clock_select = CLKR_CLKP1; int err = -ENODEV; int irq; - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - dev_err(&pdev->dev, "No platform data provided!\n"); - goto fail; + if (pdev->dev.of_node) { + of_property_read_u32(pdev->dev.of_node, + "renesas,can-clock-select", &clock_select); + } else { + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "No platform data provided!\n"); + goto fail; + } + clock_select = pdata->clock_select; } irq = platform_get_irq(pdev, 0); @@ -753,10 +777,22 @@ static int rcar_can_probe(struct platform_device *pdev) priv = netdev_priv(ndev); - priv->clk = devm_clk_get(&pdev->dev, NULL); + priv->clk = devm_clk_get(&pdev->dev, "clkp1"); if (IS_ERR(priv->clk)) { err = PTR_ERR(priv->clk); - dev_err(&pdev->dev, "cannot get clock: %d\n", err); + dev_err(&pdev->dev, "cannot get peripheral clock: %d\n", err); + goto fail_clk; + } + + if (clock_select >= ARRAY_SIZE(clock_names)) { + err = -EINVAL; + dev_err(&pdev->dev, "invalid CAN clock selected\n"); + goto fail_clk; + } + priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]); + if (IS_ERR(priv->can_clk)) { + err = PTR_ERR(priv->can_clk); + dev_err(&pdev->dev, "cannot get CAN clock: %d\n", err); goto fail_clk; } @@ -765,8 +801,8 @@ static int rcar_can_probe(struct platform_device *pdev) ndev->flags |= IFF_ECHO; priv->ndev = ndev; priv->regs = addr; - priv->clock_select = pdata->clock_select; - priv->can.clock.freq = clk_get_rate(priv->clk); + priv->clock_select = clock_select; + priv->can.clock.freq = clk_get_rate(priv->can_clk); priv->can.bittiming_const = &rcar_can_bittiming_const; priv->can.do_set_mode = rcar_can_do_set_mode; priv->can.do_get_berr_counter = rcar_can_get_berr_counter; @@ -858,10 +894,20 @@ static int __maybe_unused rcar_can_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume); +static const struct of_device_id rcar_can_of_table[] __maybe_unused = { + { .compatible = "renesas,can-r8a7778" }, + { .compatible = "renesas,can-r8a7779" }, + { .compatible = "renesas,can-r8a7790" }, + { .compatible = "renesas,can-r8a7791" }, + { } +}; +MODULE_DEVICE_TABLE(of, rcar_can_of_table); + static struct platform_driver rcar_can_driver = { .driver = { .name = RCAR_CAN_DRV_NAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(rcar_can_of_table), .pm = &rcar_can_pm_ops, }, .probe = rcar_can_probe, diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile index 531d5fcc97e5..be11ddd11b87 100644 --- a/drivers/net/can/sja1000/Makefile +++ b/drivers/net/can/sja1000/Makefile @@ -12,5 +12,3 @@ obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o obj-$(CONFIG_CAN_TSCAN1) += tscan1.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile index c5e5016c742e..a23da492dad5 100644 --- a/drivers/net/can/softing/Makefile +++ b/drivers/net/can/softing/Makefile @@ -2,5 +2,3 @@ softing-y := softing_main.o softing_fw.o obj-$(CONFIG_CAN_SOFTING) += softing.o obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile index 90bcacffbc65..0e86040cdd8c 100644 --- a/drivers/net/can/spi/Makefile +++ b/drivers/net/can/spi/Makefile @@ -4,5 +4,3 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 5df239e68812..c66d699640a9 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1107,10 +1107,10 @@ static int mcp251x_can_probe(struct spi_device *spi) * Minimum coherent DMA allocation is PAGE_SIZE, so allocate * that much and share it between Tx and Rx DMA buffers. */ - priv->spi_tx_buf = dma_alloc_coherent(&spi->dev, - PAGE_SIZE, - &priv->spi_tx_dma, - GFP_DMA); + priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev, + PAGE_SIZE, + &priv->spi_tx_dma, + GFP_DMA); if (priv->spi_tx_buf) { priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2)); @@ -1156,9 +1156,6 @@ static int mcp251x_can_probe(struct spi_device *spi) return 0; error_probe: - if (mcp251x_enable_dma) - dma_free_coherent(&spi->dev, PAGE_SIZE, - priv->spi_tx_buf, priv->spi_tx_dma); mcp251x_power_enable(priv->power, 0); out_clk: @@ -1178,11 +1175,6 @@ static int mcp251x_can_remove(struct spi_device *spi) unregister_candev(net); - if (mcp251x_enable_dma) { - dma_free_coherent(&spi->dev, PAGE_SIZE, - priv->spi_tx_buf, priv->spi_tx_dma); - } - mcp251x_power_enable(priv->power, 0); if (!IS_ERR(priv->clk)) diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile index 7b9a393b1ac8..a64cf983fb87 100644 --- a/drivers/net/can/usb/Makefile +++ b/drivers/net/can/usb/Makefile @@ -8,5 +8,3 @@ obj-$(CONFIG_CAN_GS_USB) += gs_usb.o obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o - -ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index b8fe808b7957..ea0697eaeff5 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -36,4 +36,24 @@ config NET_DSA_MV88E6123_61_65 This enables support for the Marvell 88E6123/6161/6165 ethernet switch chips. +config NET_DSA_MV88E6171 + tristate "Marvell 88E6171 ethernet switch chip support" + select NET_DSA + select NET_DSA_MV88E6XXX + select NET_DSA_TAG_EDSA + ---help--- + This enables support for the Marvell 88E6171 ethernet switch + chip. + +config NET_DSA_BCM_SF2 + tristate "Broadcom Starfighter 2 Ethernet switch support" + select NET_DSA + select NET_DSA_TAG_BRCM + select FIXED_PHY if NET_DSA_BCM_SF2=y + select BCM7XXX_PHY + select MDIO_BCM_UNIMAC + ---help--- + This enables support for the Broadcom Starfighter 2 Ethernet + switch chips. + endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index f3bda05536cc..23a90de9830e 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -7,3 +7,7 @@ endif ifdef CONFIG_NET_DSA_MV88E6131 mv88e6xxx_drv-y += mv88e6131.o endif +ifdef CONFIG_NET_DSA_MV88E6171 +mv88e6xxx_drv-y += mv88e6171.o +endif +obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c new file mode 100644 index 000000000000..d9b7da545063 --- /dev/null +++ b/drivers/net/dsa/bcm_sf2.c @@ -0,0 +1,785 @@ +/* + * Broadcom Starfighter 2 DSA switch driver + * + * Copyright (C) 2014, Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/list.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <linux/mii.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <net/dsa.h> +#include <linux/ethtool.h> + +#include "bcm_sf2.h" +#include "bcm_sf2_regs.h" + +/* String, offset, and register size in bytes if different from 4 bytes */ +static const struct bcm_sf2_hw_stats bcm_sf2_mib[] = { + { "TxOctets", 0x000, 8 }, + { "TxDropPkts", 0x020 }, + { "TxQPKTQ0", 0x030 }, + { "TxBroadcastPkts", 0x040 }, + { "TxMulticastPkts", 0x050 }, + { "TxUnicastPKts", 0x060 }, + { "TxCollisions", 0x070 }, + { "TxSingleCollision", 0x080 }, + { "TxMultipleCollision", 0x090 }, + { "TxDeferredCollision", 0x0a0 }, + { "TxLateCollision", 0x0b0 }, + { "TxExcessiveCollision", 0x0c0 }, + { "TxFrameInDisc", 0x0d0 }, + { "TxPausePkts", 0x0e0 }, + { "TxQPKTQ1", 0x0f0 }, + { "TxQPKTQ2", 0x100 }, + { "TxQPKTQ3", 0x110 }, + { "TxQPKTQ4", 0x120 }, + { "TxQPKTQ5", 0x130 }, + { "RxOctets", 0x140, 8 }, + { "RxUndersizePkts", 0x160 }, + { "RxPausePkts", 0x170 }, + { "RxPkts64Octets", 0x180 }, + { "RxPkts65to127Octets", 0x190 }, + { "RxPkts128to255Octets", 0x1a0 }, + { "RxPkts256to511Octets", 0x1b0 }, + { "RxPkts512to1023Octets", 0x1c0 }, + { "RxPkts1024toMaxPktsOctets", 0x1d0 }, + { "RxOversizePkts", 0x1e0 }, + { "RxJabbers", 0x1f0 }, + { "RxAlignmentErrors", 0x200 }, + { "RxFCSErrors", 0x210 }, + { "RxGoodOctets", 0x220, 8 }, + { "RxDropPkts", 0x240 }, + { "RxUnicastPkts", 0x250 }, + { "RxMulticastPkts", 0x260 }, + { "RxBroadcastPkts", 0x270 }, + { "RxSAChanges", 0x280 }, + { "RxFragments", 0x290 }, + { "RxJumboPkt", 0x2a0 }, + { "RxSymblErr", 0x2b0 }, + { "InRangeErrCount", 0x2c0 }, + { "OutRangeErrCount", 0x2d0 }, + { "EEELpiEvent", 0x2e0 }, + { "EEELpiDuration", 0x2f0 }, + { "RxDiscard", 0x300, 8 }, + { "TxQPKTQ6", 0x320 }, + { "TxQPKTQ7", 0x330 }, + { "TxPkts64Octets", 0x340 }, + { "TxPkts65to127Octets", 0x350 }, + { "TxPkts128to255Octets", 0x360 }, + { "TxPkts256to511Ocets", 0x370 }, + { "TxPkts512to1023Ocets", 0x380 }, + { "TxPkts1024toMaxPktOcets", 0x390 }, +}; + +#define BCM_SF2_STATS_SIZE ARRAY_SIZE(bcm_sf2_mib) + +static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, + int port, uint8_t *data) +{ + unsigned int i; + + for (i = 0; i < BCM_SF2_STATS_SIZE; i++) + memcpy(data + i * ETH_GSTRING_LEN, + bcm_sf2_mib[i].string, ETH_GSTRING_LEN); +} + +static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, + int port, uint64_t *data) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + const struct bcm_sf2_hw_stats *s; + unsigned int i; + u64 val = 0; + u32 offset; + + mutex_lock(&priv->stats_mutex); + + /* Now fetch the per-port counters */ + for (i = 0; i < BCM_SF2_STATS_SIZE; i++) { + s = &bcm_sf2_mib[i]; + + /* Do a latched 64-bit read if needed */ + offset = s->reg + CORE_P_MIB_OFFSET(port); + if (s->sizeof_stat == 8) + val = core_readq(priv, offset); + else + val = core_readl(priv, offset); + + data[i] = (u64)val; + } + + mutex_unlock(&priv->stats_mutex); +} + +static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) +{ + return BCM_SF2_STATS_SIZE; +} + +static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr) +{ + return "Broadcom Starfighter 2"; +} + +static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int i; + u32 reg, val; + + /* Enable the port memories */ + reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); + reg &= ~P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + + /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */ + reg = core_readl(priv, CORE_IMP_CTL); + reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN); + reg &= ~(RX_DIS | TX_DIS); + core_writel(priv, reg, CORE_IMP_CTL); + + /* Enable forwarding */ + core_writel(priv, SW_FWDG_EN, CORE_SWMODE); + + /* Enable IMP port in dumb mode */ + reg = core_readl(priv, CORE_SWITCH_CTRL); + reg |= MII_DUMB_FWDG_EN; + core_writel(priv, reg, CORE_SWITCH_CTRL); + + /* Resolve which bit controls the Broadcom tag */ + switch (port) { + case 8: + val = BRCM_HDR_EN_P8; + break; + case 7: + val = BRCM_HDR_EN_P7; + break; + case 5: + val = BRCM_HDR_EN_P5; + break; + default: + val = 0; + break; + } + + /* Enable Broadcom tags for IMP port */ + reg = core_readl(priv, CORE_BRCM_HDR_CTRL); + reg |= val; + core_writel(priv, reg, CORE_BRCM_HDR_CTRL); + + /* Enable reception Broadcom tag for CPU TX (switch RX) to + * allow us to tag outgoing frames + */ + reg = core_readl(priv, CORE_BRCM_HDR_RX_DIS); + reg &= ~(1 << port); + core_writel(priv, reg, CORE_BRCM_HDR_RX_DIS); + + /* Enable transmission of Broadcom tags from the switch (CPU RX) to + * allow delivering frames to the per-port net_devices + */ + reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS); + reg &= ~(1 << port); + core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS); + + /* Force link status for IMP port */ + reg = core_readl(priv, CORE_STS_OVERRIDE_IMP); + reg |= (MII_SW_OR | LINK_STS); + core_writel(priv, reg, CORE_STS_OVERRIDE_IMP); + + /* Enable the IMP Port to be in the same VLAN as the other ports + * on a per-port basis such that we only have Port i and IMP in + * the same VLAN. + */ + for (i = 0; i < priv->hw_params.num_ports; i++) { + if (!((1 << i) & ds->phys_port_mask)) + continue; + + reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); + reg |= (1 << port); + core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(i)); + } +} + +static void bcm_sf2_port_setup(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 reg; + + /* Clear the memory power down */ + reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); + reg &= ~P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + + /* Clear the Rx and Tx disable bits and set to no spanning tree */ + core_writel(priv, 0, CORE_G_PCTL_PORT(port)); + + /* Enable port 7 interrupts to get notified */ + if (port == 7) + intrl2_1_mask_clear(priv, P_IRQ_MASK(P7_IRQ_OFF)); + + /* Set this port, and only this one to be in the default VLAN */ + reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); + reg &= ~PORT_VLAN_CTRL_MASK; + reg |= (1 << port); + core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port)); +} + +static void bcm_sf2_port_disable(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 off, reg; + + if (priv->wol_ports_mask & (1 << port)) + return; + + if (dsa_is_cpu_port(ds, port)) + off = CORE_IMP_CTL; + else + off = CORE_G_PCTL_PORT(port); + + reg = core_readl(priv, off); + reg |= RX_DIS | TX_DIS; + core_writel(priv, reg, off); + + /* Power down the port memory */ + reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); + reg |= P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); +} + +static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) +{ + struct bcm_sf2_priv *priv = dev_id; + + priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & + ~priv->irq0_mask; + intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); + + return IRQ_HANDLED; +} + +static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) +{ + struct bcm_sf2_priv *priv = dev_id; + + priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & + ~priv->irq1_mask; + intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); + + if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) + priv->port_sts[7].link = 1; + if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) + priv->port_sts[7].link = 0; + + return IRQ_HANDLED; +} + +static int bcm_sf2_sw_setup(struct dsa_switch *ds) +{ + const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct device_node *dn; + void __iomem **base; + unsigned int port; + unsigned int i; + u32 reg, rev; + int ret; + + spin_lock_init(&priv->indir_lock); + mutex_init(&priv->stats_mutex); + + /* All the interesting properties are at the parent device_node + * level + */ + dn = ds->pd->of_node->parent; + + priv->irq0 = irq_of_parse_and_map(dn, 0); + priv->irq1 = irq_of_parse_and_map(dn, 1); + + base = &priv->core; + for (i = 0; i < BCM_SF2_REGS_NUM; i++) { + *base = of_iomap(dn, i); + if (*base == NULL) { + pr_err("unable to find register: %s\n", reg_names[i]); + return -ENODEV; + } + base++; + } + + /* Disable all interrupts and request them */ + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + + ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, + "switch_0", priv); + if (ret < 0) { + pr_err("failed to request switch_0 IRQ\n"); + goto out_unmap; + } + + ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, + "switch_1", priv); + if (ret < 0) { + pr_err("failed to request switch_1 IRQ\n"); + goto out_free_irq0; + } + + /* Reset the MIB counters */ + reg = core_readl(priv, CORE_GMNCFGCFG); + reg |= RST_MIB_CNT; + core_writel(priv, reg, CORE_GMNCFGCFG); + reg &= ~RST_MIB_CNT; + core_writel(priv, reg, CORE_GMNCFGCFG); + + /* Get the maximum number of ports for this switch */ + priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; + if (priv->hw_params.num_ports > DSA_MAX_PORTS) + priv->hw_params.num_ports = DSA_MAX_PORTS; + + /* Assume a single GPHY setup if we can't read that property */ + if (of_property_read_u32(dn, "brcm,num-gphy", + &priv->hw_params.num_gphy)) + priv->hw_params.num_gphy = 1; + + /* Enable all valid ports and disable those unused */ + for (port = 0; port < priv->hw_params.num_ports; port++) { + /* IMP port receives special treatment */ + if ((1 << port) & ds->phys_port_mask) + bcm_sf2_port_setup(ds, port); + else if (dsa_is_cpu_port(ds, port)) + bcm_sf2_imp_setup(ds, port); + else + bcm_sf2_port_disable(ds, port); + } + + /* Include the pseudo-PHY address and the broadcast PHY address to + * divert reads towards our workaround + */ + ds->phys_mii_mask |= ((1 << 30) | (1 << 0)); + + rev = reg_readl(priv, REG_SWITCH_REVISION); + priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & + SWITCH_TOP_REV_MASK; + priv->hw_params.core_rev = (rev & SF2_REV_MASK); + + rev = reg_readl(priv, REG_PHY_REVISION); + priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; + + pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", + priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, + priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, + priv->core, priv->irq0, priv->irq1); + + return 0; + +out_free_irq0: + free_irq(priv->irq0, priv); +out_unmap: + base = &priv->core; + for (i = 0; i < BCM_SF2_REGS_NUM; i++) { + iounmap(*base); + base++; + } + return ret; +} + +static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr) +{ + return 0; +} + +static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + + /* The BCM7xxx PHY driver expects to find the integrated PHY revision + * in bits 15:8 and the patch level in bits 7:0 which is exactly what + * the REG_PHY_REVISION register layout is. + */ + + return priv->hw_params.gphy_rev; +} + +static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr, + int regnum, u16 val) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + int ret = 0; + u32 reg; + + reg = reg_readl(priv, REG_SWITCH_CNTRL); + reg |= MDIO_MASTER_SEL; + reg_writel(priv, reg, REG_SWITCH_CNTRL); + + /* Page << 8 | offset */ + reg = 0x70; + reg <<= 2; + core_writel(priv, addr, reg); + + /* Page << 8 | offset */ + reg = 0x80 << 8 | regnum << 1; + reg <<= 2; + + if (op) + ret = core_readl(priv, reg); + else + core_writel(priv, val, reg); + + reg = reg_readl(priv, REG_SWITCH_CNTRL); + reg &= ~MDIO_MASTER_SEL; + reg_writel(priv, reg, REG_SWITCH_CNTRL); + + return ret & 0xffff; +} + +static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum) +{ + /* Intercept reads from the MDIO broadcast address or Broadcom + * pseudo-PHY address + */ + switch (addr) { + case 0: + case 30: + return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0); + default: + return 0xffff; + } +} + +static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum, + u16 val) +{ + /* Intercept writes to the MDIO broadcast address or Broadcom + * pseudo-PHY address + */ + switch (addr) { + case 0: + case 30: + bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val); + break; + } + + return 0; +} + +static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 id_mode_dis = 0, port_mode; + const char *str = NULL; + u32 reg; + + switch (phydev->interface) { + case PHY_INTERFACE_MODE_RGMII: + str = "RGMII (no delay)"; + id_mode_dis = 1; + case PHY_INTERFACE_MODE_RGMII_TXID: + if (!str) + str = "RGMII (TX delay)"; + port_mode = EXT_GPHY; + break; + case PHY_INTERFACE_MODE_MII: + str = "MII"; + port_mode = EXT_EPHY; + break; + case PHY_INTERFACE_MODE_REVMII: + str = "Reverse MII"; + port_mode = EXT_REVMII; + break; + default: + goto force_link; + } + + /* Clear id_mode_dis bit, and the existing port mode, but + * make sure we enable the RGMII block for data to pass + */ + reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg &= ~ID_MODE_DIS; + reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); + reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); + + reg |= port_mode | RGMII_MODE_EN; + if (id_mode_dis) + reg |= ID_MODE_DIS; + + if (phydev->pause) { + if (phydev->asym_pause) + reg |= TX_PAUSE_EN; + reg |= RX_PAUSE_EN; + } + + reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + + pr_info("Port %d configured for %s\n", port, str); + +force_link: + /* Force link settings detected from the PHY */ + reg = SW_OVERRIDE; + switch (phydev->speed) { + case SPEED_1000: + reg |= SPDSTS_1000 << SPEED_SHIFT; + break; + case SPEED_100: + reg |= SPDSTS_100 << SPEED_SHIFT; + break; + } + + if (phydev->link) + reg |= LINK_STS; + if (phydev->duplex == DUPLEX_FULL) + reg |= DUPLX_MODE; + + core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); +} + +static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, + struct fixed_phy_status *status) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + u32 link, duplex, pause, speed; + u32 reg; + + link = core_readl(priv, CORE_LNKSTS); + duplex = core_readl(priv, CORE_DUPSTS); + pause = core_readl(priv, CORE_PAUSESTS); + speed = core_readl(priv, CORE_SPDSTS); + + speed >>= (port * SPDSTS_SHIFT); + speed &= SPDSTS_MASK; + + status->link = 0; + + /* Port 7 is special as we do not get link status from CORE_LNKSTS, + * which means that we need to force the link at the port override + * level to get the data to flow. We do use what the interrupt handler + * did determine before. + */ + if (port == 7) { + status->link = priv->port_sts[port].link; + reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(7)); + reg |= SW_OVERRIDE; + if (status->link) + reg |= LINK_STS; + else + reg &= ~LINK_STS; + core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(7)); + status->duplex = 1; + } else { + status->link = !!(link & (1 << port)); + status->duplex = !!(duplex & (1 << port)); + } + + switch (speed) { + case SPDSTS_10: + status->speed = SPEED_10; + break; + case SPDSTS_100: + status->speed = SPEED_100; + break; + case SPDSTS_1000: + status->speed = SPEED_1000; + break; + } + + if ((pause & (1 << port)) && + (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { + status->asym_pause = 1; + status->pause = 1; + } + + if (pause & (1 << port)) + status->pause = 1; +} + +static int bcm_sf2_sw_suspend(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int port; + + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); + intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); + intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); + + /* Disable all ports physically present including the IMP + * port, the other ones have already been disabled during + * bcm_sf2_sw_setup + */ + for (port = 0; port < DSA_MAX_PORTS; port++) { + if ((1 << port) & ds->phys_port_mask || + dsa_is_cpu_port(ds, port)) + bcm_sf2_port_disable(ds, port); + } + + return 0; +} + +static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 1000; + u32 reg; + + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET; + core_writel(priv, reg, CORE_WATCHDOG_CTRL); + + do { + reg = core_readl(priv, CORE_WATCHDOG_CTRL); + if (!(reg & SOFTWARE_RESET)) + break; + + usleep_range(1000, 2000); + } while (timeout-- > 0); + + if (timeout == 0) + return -ETIMEDOUT; + + return 0; +} + +static int bcm_sf2_sw_resume(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int port; + u32 reg; + int ret; + + ret = bcm_sf2_sw_rst(priv); + if (ret) { + pr_err("%s: failed to software reset switch\n", __func__); + return ret; + } + + /* Reinitialize the single GPHY */ + if (priv->hw_params.num_gphy == 1) { + reg = reg_readl(priv, REG_SPHY_CNTRL); + reg |= PHY_RESET; + reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS); + reg_writel(priv, reg, REG_SPHY_CNTRL); + udelay(21); + reg = reg_readl(priv, REG_SPHY_CNTRL); + reg &= ~PHY_RESET; + reg_writel(priv, reg, REG_SPHY_CNTRL); + } + + for (port = 0; port < DSA_MAX_PORTS; port++) { + if ((1 << port) & ds->phys_port_mask) + bcm_sf2_port_setup(ds, port); + else if (dsa_is_cpu_port(ds, port)) + bcm_sf2_imp_setup(ds, port); + } + + return 0; +} + +static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *wol) +{ + struct net_device *p = ds->dst[ds->index].master_netdev; + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct ethtool_wolinfo pwol; + + /* Get the parent device WoL settings */ + p->ethtool_ops->get_wol(p, &pwol); + + /* Advertise the parent device supported settings */ + wol->supported = pwol.supported; + memset(&wol->sopass, 0, sizeof(wol->sopass)); + + if (pwol.wolopts & WAKE_MAGICSECURE) + memcpy(&wol->sopass, pwol.sopass, sizeof(wol->sopass)); + + if (priv->wol_ports_mask & (1 << port)) + wol->wolopts = pwol.wolopts; + else + wol->wolopts = 0; +} + +static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *wol) +{ + struct net_device *p = ds->dst[ds->index].master_netdev; + struct bcm_sf2_priv *priv = ds_to_priv(ds); + s8 cpu_port = ds->dst[ds->index].cpu_port; + struct ethtool_wolinfo pwol; + + p->ethtool_ops->get_wol(p, &pwol); + if (wol->wolopts & ~pwol.supported) + return -EINVAL; + + if (wol->wolopts) + priv->wol_ports_mask |= (1 << port); + else + priv->wol_ports_mask &= ~(1 << port); + + /* If we have at least one port enabled, make sure the CPU port + * is also enabled. If the CPU port is the last one enabled, we disable + * it since this configuration does not make sense. + */ + if (priv->wol_ports_mask && priv->wol_ports_mask != (1 << cpu_port)) + priv->wol_ports_mask |= (1 << cpu_port); + else + priv->wol_ports_mask &= ~(1 << cpu_port); + + return p->ethtool_ops->set_wol(p, wol); +} + +static struct dsa_switch_driver bcm_sf2_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_BRCM, + .priv_size = sizeof(struct bcm_sf2_priv), + .probe = bcm_sf2_sw_probe, + .setup = bcm_sf2_sw_setup, + .set_addr = bcm_sf2_sw_set_addr, + .get_phy_flags = bcm_sf2_sw_get_phy_flags, + .phy_read = bcm_sf2_sw_phy_read, + .phy_write = bcm_sf2_sw_phy_write, + .get_strings = bcm_sf2_sw_get_strings, + .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, + .get_sset_count = bcm_sf2_sw_get_sset_count, + .adjust_link = bcm_sf2_sw_adjust_link, + .fixed_link_update = bcm_sf2_sw_fixed_link_update, + .suspend = bcm_sf2_sw_suspend, + .resume = bcm_sf2_sw_resume, + .get_wol = bcm_sf2_sw_get_wol, + .set_wol = bcm_sf2_sw_set_wol, +}; + +static int __init bcm_sf2_init(void) +{ + register_switch_driver(&bcm_sf2_switch_driver); + + return 0; +} +module_init(bcm_sf2_init); + +static void __exit bcm_sf2_exit(void) +{ + unregister_switch_driver(&bcm_sf2_switch_driver); +} +module_exit(bcm_sf2_exit); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:brcm-sf2"); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h new file mode 100644 index 000000000000..8fd6c1451a84 --- /dev/null +++ b/drivers/net/dsa/bcm_sf2.h @@ -0,0 +1,144 @@ +/* + * Broadcom Starfighter2 private context + * + * Copyright (C) 2014, Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __BCM_SF2_H +#define __BCM_SF2_H + +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/mii.h> + +#include <net/dsa.h> + +#include "bcm_sf2_regs.h" + +struct bcm_sf2_hw_params { + u16 top_rev; + u16 core_rev; + u16 gphy_rev; + u32 num_gphy; + u8 num_acb_queue; + u8 num_rgmii; + u8 num_ports; + u8 fcb_pause_override:1; + u8 acb_packets_inflight:1; +}; + +#define BCM_SF2_REGS_NAME {\ + "core", "reg", "intrl2_0", "intrl2_1", "fcb", "acb" \ +} + +#define BCM_SF2_REGS_NUM 6 + +struct bcm_sf2_port_status { + unsigned int link; +}; + +struct bcm_sf2_priv { + /* Base registers, keep those in order with BCM_SF2_REGS_NAME */ + void __iomem *core; + void __iomem *reg; + void __iomem *intrl2_0; + void __iomem *intrl2_1; + void __iomem *fcb; + void __iomem *acb; + + /* spinlock protecting access to the indirect registers */ + spinlock_t indir_lock; + + int irq0; + int irq1; + u32 irq0_stat; + u32 irq0_mask; + u32 irq1_stat; + u32 irq1_mask; + + /* Mutex protecting access to the MIB counters */ + struct mutex stats_mutex; + + struct bcm_sf2_hw_params hw_params; + + struct bcm_sf2_port_status port_sts[DSA_MAX_PORTS]; + + /* Mask of ports enabled for Wake-on-LAN */ + u32 wol_ports_mask; +}; + +struct bcm_sf2_hw_stats { + const char *string; + u16 reg; + u8 sizeof_stat; +}; + +#define SF2_IO_MACRO(name) \ +static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \ +{ \ + return __raw_readl(priv->name + off); \ +} \ +static inline void name##_writel(struct bcm_sf2_priv *priv, \ + u32 val, u32 off) \ +{ \ + __raw_writel(val, priv->name + off); \ +} \ + +/* Accesses to 64-bits register requires us to latch the hi/lo pairs + * using the REG_DIR_DATA_{READ,WRITE} ancillary registers. The 'indir_lock' + * spinlock is automatically grabbed and released to provide relative + * atomiticy with latched reads/writes. + */ +#define SF2_IO64_MACRO(name) \ +static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ +{ \ + u32 indir, dir; \ + spin_lock(&priv->indir_lock); \ + indir = reg_readl(priv, REG_DIR_DATA_READ); \ + dir = __raw_readl(priv->name + off); \ + spin_unlock(&priv->indir_lock); \ + return (u64)indir << 32 | dir; \ +} \ +static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off, \ + u64 val) \ +{ \ + spin_lock(&priv->indir_lock); \ + reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \ + __raw_writel(lower_32_bits(val), priv->name + off); \ + spin_unlock(&priv->indir_lock); \ +} + +#define SWITCH_INTR_L2(which) \ +static inline void intrl2_##which##_mask_clear(struct bcm_sf2_priv *priv, \ + u32 mask) \ +{ \ + intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ + priv->irq##which##_mask &= ~(mask); \ +} \ +static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \ + u32 mask) \ +{ \ + intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ + priv->irq##which##_mask |= (mask); \ +} \ + +SF2_IO_MACRO(core); +SF2_IO_MACRO(reg); +SF2_IO64_MACRO(core); +SF2_IO_MACRO(intrl2_0); +SF2_IO_MACRO(intrl2_1); +SF2_IO_MACRO(fcb); +SF2_IO_MACRO(acb); + +SWITCH_INTR_L2(0); +SWITCH_INTR_L2(1); + +#endif /* __BCM_SF2_H */ diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h new file mode 100644 index 000000000000..c65f138c777f --- /dev/null +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -0,0 +1,228 @@ +/* + * Broadcom Starfighter 2 switch register defines + * + * Copyright (C) 2014, Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef __BCM_SF2_REGS_H +#define __BCM_SF2_REGS_H + +/* Register set relative to 'REG' */ +#define REG_SWITCH_CNTRL 0x00 +#define MDIO_MASTER_SEL (1 << 0) + +#define REG_SWITCH_STATUS 0x04 +#define REG_DIR_DATA_WRITE 0x08 +#define REG_DIR_DATA_READ 0x0C + +#define REG_SWITCH_REVISION 0x18 +#define SF2_REV_MASK 0xffff +#define SWITCH_TOP_REV_SHIFT 16 +#define SWITCH_TOP_REV_MASK 0xffff + +#define REG_PHY_REVISION 0x1C +#define PHY_REVISION_MASK 0xffff + +#define REG_SPHY_CNTRL 0x2C +#define IDDQ_BIAS (1 << 0) +#define EXT_PWR_DOWN (1 << 1) +#define FORCE_DLL_EN (1 << 2) +#define IDDQ_GLOBAL_PWR (1 << 3) +#define CK25_DIS (1 << 4) +#define PHY_RESET (1 << 5) +#define PHY_PHYAD_SHIFT 8 +#define PHY_PHYAD_MASK 0x1F + +#define REG_RGMII_0_BASE 0x34 +#define REG_RGMII_CNTRL 0x00 +#define REG_RGMII_IB_STATUS 0x04 +#define REG_RGMII_RX_CLOCK_DELAY_CNTRL 0x08 +#define REG_RGMII_CNTRL_SIZE 0x0C +#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_BASE + \ + ((x) * REG_RGMII_CNTRL_SIZE)) +/* Relative to REG_RGMII_CNTRL */ +#define RGMII_MODE_EN (1 << 0) +#define ID_MODE_DIS (1 << 1) +#define PORT_MODE_SHIFT 2 +#define INT_EPHY (0 << PORT_MODE_SHIFT) +#define INT_GPHY (1 << PORT_MODE_SHIFT) +#define EXT_EPHY (2 << PORT_MODE_SHIFT) +#define EXT_GPHY (3 << PORT_MODE_SHIFT) +#define EXT_REVMII (4 << PORT_MODE_SHIFT) +#define PORT_MODE_MASK 0x7 +#define RVMII_REF_SEL (1 << 5) +#define RX_PAUSE_EN (1 << 6) +#define TX_PAUSE_EN (1 << 7) +#define TX_CLK_STOP_EN (1 << 8) +#define LPI_COUNT_SHIFT 9 +#define LPI_COUNT_MASK 0x3F + +/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */ +#define INTRL2_CPU_STATUS 0x00 +#define INTRL2_CPU_SET 0x04 +#define INTRL2_CPU_CLEAR 0x08 +#define INTRL2_CPU_MASK_STATUS 0x0c +#define INTRL2_CPU_MASK_SET 0x10 +#define INTRL2_CPU_MASK_CLEAR 0x14 + +/* Shared INTRL2_0 and INTRL2_ interrupt sources macros */ +#define P_LINK_UP_IRQ(x) (1 << (0 + (x))) +#define P_LINK_DOWN_IRQ(x) (1 << (1 + (x))) +#define P_ENERGY_ON_IRQ(x) (1 << (2 + (x))) +#define P_ENERGY_OFF_IRQ(x) (1 << (3 + (x))) +#define P_GPHY_IRQ(x) (1 << (4 + (x))) +#define P_NUM_IRQ 5 +#define P_IRQ_MASK(x) (P_LINK_UP_IRQ((x)) | \ + P_LINK_DOWN_IRQ((x)) | \ + P_ENERGY_ON_IRQ((x)) | \ + P_ENERGY_OFF_IRQ((x)) | \ + P_GPHY_IRQ((x))) + +/* INTRL2_0 interrupt sources */ +#define P0_IRQ_OFF 0 +#define MEM_DOUBLE_IRQ (1 << 5) +#define EEE_LPI_IRQ (1 << 6) +#define P5_CPU_WAKE_IRQ (1 << 7) +#define P8_CPU_WAKE_IRQ (1 << 8) +#define P7_CPU_WAKE_IRQ (1 << 9) +#define IEEE1588_IRQ (1 << 10) +#define MDIO_ERR_IRQ (1 << 11) +#define MDIO_DONE_IRQ (1 << 12) +#define GISB_ERR_IRQ (1 << 13) +#define UBUS_ERR_IRQ (1 << 14) +#define FAILOVER_ON_IRQ (1 << 15) +#define FAILOVER_OFF_IRQ (1 << 16) +#define TCAM_SOFT_ERR_IRQ (1 << 17) + +/* INTRL2_1 interrupt sources */ +#define P7_IRQ_OFF 0 +#define P_IRQ_OFF(x) ((6 - (x)) * P_NUM_IRQ) + +/* Register set relative to 'CORE' */ +#define CORE_G_PCTL_PORT0 0x00000 +#define CORE_G_PCTL_PORT(x) (CORE_G_PCTL_PORT0 + (x * 0x4)) +#define CORE_IMP_CTL 0x00020 +#define RX_DIS (1 << 0) +#define TX_DIS (1 << 1) +#define RX_BCST_EN (1 << 2) +#define RX_MCST_EN (1 << 3) +#define RX_UCST_EN (1 << 4) +#define G_MISTP_STATE_SHIFT 5 +#define G_MISTP_NO_STP (0 << G_MISTP_STATE_SHIFT) +#define G_MISTP_DIS_STATE (1 << G_MISTP_STATE_SHIFT) +#define G_MISTP_BLOCK_STATE (2 << G_MISTP_STATE_SHIFT) +#define G_MISTP_LISTEN_STATE (3 << G_MISTP_STATE_SHIFT) +#define G_MISTP_LEARN_STATE (4 << G_MISTP_STATE_SHIFT) +#define G_MISTP_FWD_STATE (5 << G_MISTP_STATE_SHIFT) +#define G_MISTP_STATE_MASK 0x7 + +#define CORE_SWMODE 0x0002c +#define SW_FWDG_MODE (1 << 0) +#define SW_FWDG_EN (1 << 1) +#define RTRY_LMT_DIS (1 << 2) + +#define CORE_STS_OVERRIDE_IMP 0x00038 +#define GMII_SPEED_UP_2G (1 << 6) +#define MII_SW_OR (1 << 7) + +#define CORE_NEW_CTRL 0x00084 +#define IP_MC (1 << 0) +#define OUTRANGEERR_DISCARD (1 << 1) +#define INRANGEERR_DISCARD (1 << 2) +#define CABLE_DIAG_LEN (1 << 3) +#define OVERRIDE_AUTO_PD_WAR (1 << 4) +#define EN_AUTO_PD_WAR (1 << 5) +#define UC_FWD_EN (1 << 6) +#define MC_FWD_EN (1 << 7) + +#define CORE_SWITCH_CTRL 0x00088 +#define MII_DUMB_FWDG_EN (1 << 6) + +#define CORE_SFT_LRN_CTRL 0x000f8 +#define SW_LEARN_CNTL(x) (1 << (x)) + +#define CORE_STS_OVERRIDE_GMIIP_PORT(x) (0x160 + (x) * 4) +#define LINK_STS (1 << 0) +#define DUPLX_MODE (1 << 1) +#define SPEED_SHIFT 2 +#define SPEED_MASK 0x3 +#define RXFLOW_CNTL (1 << 4) +#define TXFLOW_CNTL (1 << 5) +#define SW_OVERRIDE (1 << 6) + +#define CORE_WATCHDOG_CTRL 0x001e4 +#define SOFTWARE_RESET (1 << 7) +#define EN_CHIP_RST (1 << 6) +#define EN_SW_RESET (1 << 4) + +#define CORE_LNKSTS 0x00400 +#define LNK_STS_MASK 0x1ff + +#define CORE_SPDSTS 0x00410 +#define SPDSTS_10 0 +#define SPDSTS_100 1 +#define SPDSTS_1000 2 +#define SPDSTS_SHIFT 2 +#define SPDSTS_MASK 0x3 + +#define CORE_DUPSTS 0x00420 +#define CORE_DUPSTS_MASK 0x1ff + +#define CORE_PAUSESTS 0x00428 +#define PAUSESTS_TX_PAUSE_SHIFT 9 + +#define CORE_GMNCFGCFG 0x0800 +#define RST_MIB_CNT (1 << 0) +#define RXBPDU_EN (1 << 1) + +#define CORE_IMP0_PRT_ID 0x0804 + +#define CORE_BRCM_HDR_CTRL 0x0080c +#define BRCM_HDR_EN_P8 (1 << 0) +#define BRCM_HDR_EN_P5 (1 << 1) +#define BRCM_HDR_EN_P7 (1 << 2) + +#define CORE_BRCM_HDR_CTRL2 0x0828 + +#define CORE_HL_PRTC_CTRL 0x0940 +#define ARP_EN (1 << 0) +#define RARP_EN (1 << 1) +#define DHCP_EN (1 << 2) +#define ICMPV4_EN (1 << 3) +#define ICMPV6_EN (1 << 4) +#define ICMPV6_FWD_MODE (1 << 5) +#define IGMP_DIP_EN (1 << 8) +#define IGMP_RPTLVE_EN (1 << 9) +#define IGMP_RTPLVE_FWD_MODE (1 << 10) +#define IGMP_QRY_EN (1 << 11) +#define IGMP_QRY_FWD_MODE (1 << 12) +#define IGMP_UKN_EN (1 << 13) +#define IGMP_UKN_FWD_MODE (1 << 14) +#define MLD_RPTDONE_EN (1 << 15) +#define MLD_RPTDONE_FWD_MODE (1 << 16) +#define MLD_QRY_EN (1 << 17) +#define MLD_QRY_FWD_MODE (1 << 18) + +#define CORE_RST_MIB_CNT_EN 0x0950 + +#define CORE_BRCM_HDR_RX_DIS 0x0980 +#define CORE_BRCM_HDR_TX_DIS 0x0988 + +#define CORE_MEM_PSM_VDD_CTRL 0x2380 +#define P_TXQ_PSM_VDD_SHIFT 2 +#define P_TXQ_PSM_VDD_MASK 0x3 +#define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \ + ((x) * P_TXQ_PSM_VDD_SHIFT)) + +#define CORE_P0_MIB_OFFSET 0x8000 +#define P_MIB_SIZE 0x400 +#define CORE_P_MIB_OFFSET(x) (CORE_P0_MIB_OFFSET + (x) * P_MIB_SIZE) + +#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) +#define PORT_VLAN_CTRL_MASK 0x1ff + +#endif /* __BCM_SF2_REGS_H */ diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index 7a54ec04b418..776e965dc9f4 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c @@ -21,7 +21,8 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) { - return mdiobus_read(ds->master_mii_bus, ds->pd->sw_addr + addr, reg); + return mdiobus_read(to_mii_bus(ds->master_dev), + ds->pd->sw_addr + addr, reg); } #define REG_READ(addr, reg) \ @@ -37,8 +38,8 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg) static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) { - return mdiobus_write(ds->master_mii_bus, ds->pd->sw_addr + addr, - reg, val); + return mdiobus_write(to_mii_bus(ds->master_dev), + ds->pd->sw_addr + addr, reg, val); } #define REG_WRITE(addr, reg, val) \ @@ -50,10 +51,14 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) return __ret; \ }) -static char *mv88e6060_probe(struct mii_bus *bus, int sw_addr) +static char *mv88e6060_probe(struct device *host_dev, int sw_addr) { + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int ret; + if (bus == NULL) + return NULL; + ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); if (ret >= 0) { ret &= 0xfff0; @@ -258,7 +263,7 @@ static void mv88e6060_poll_link(struct dsa_switch *ds) } static struct dsa_switch_driver mv88e6060_switch_driver = { - .tag_protocol = htons(ETH_P_TRAILER), + .tag_protocol = DSA_TAG_PROTO_TRAILER, .probe = mv88e6060_probe, .setup = mv88e6060_setup, .set_addr = mv88e6060_set_addr, diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c index 69c42513dd72..a332c53ff955 100644 --- a/drivers/net/dsa/mv88e6123_61_65.c +++ b/drivers/net/dsa/mv88e6123_61_65.c @@ -17,10 +17,14 @@ #include <net/dsa.h> #include "mv88e6xxx.h" -static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr) +static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr) { + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int ret; + if (bus == NULL) + return NULL; + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); if (ret >= 0) { if (ret == 0x1212) @@ -207,7 +211,7 @@ static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) */ val = 0x0433; if (dsa_is_cpu_port(ds, p)) { - if (ds->dst->tag_protocol == htons(ETH_P_EDSA)) + if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA) val |= 0x3300; else val |= 0x0100; @@ -391,7 +395,7 @@ static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds) } struct dsa_switch_driver mv88e6123_61_65_switch_driver = { - .tag_protocol = cpu_to_be16(ETH_P_EDSA), + .tag_protocol = DSA_TAG_PROTO_EDSA, .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6123_61_65_probe, .setup = mv88e6123_61_65_setup, diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index 953bc6a49e59..244c735014fa 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -22,10 +22,14 @@ #define ID_6095 0x0950 #define ID_6131 0x1060 -static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) +static char *mv88e6131_probe(struct device *host_dev, int sw_addr) { + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); int ret; + if (bus == NULL) + return NULL; + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); if (ret >= 0) { ret &= 0xfff0; @@ -379,7 +383,7 @@ static int mv88e6131_get_sset_count(struct dsa_switch *ds) } struct dsa_switch_driver mv88e6131_switch_driver = { - .tag_protocol = cpu_to_be16(ETH_P_DSA), + .tag_protocol = DSA_TAG_PROTO_DSA, .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6131_probe, .setup = mv88e6131_setup, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c new file mode 100644 index 000000000000..6365e30138af --- /dev/null +++ b/drivers/net/dsa/mv88e6171.c @@ -0,0 +1,411 @@ +/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support + * Copyright (c) 2008-2009 Marvell Semiconductor + * Copyright (c) 2014 Claudio Leite <leitec@staticky.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <net/dsa.h> +#include "mv88e6xxx.h" + +static char *mv88e6171_probe(struct device *host_dev, int sw_addr) +{ + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); + int ret; + + if (bus == NULL) + return NULL; + + ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); + if (ret >= 0) { + if ((ret & 0xfff0) == 0x1710) + return "Marvell 88E6171"; + } + + return NULL; +} + +static int mv88e6171_switch_reset(struct dsa_switch *ds) +{ + int i; + int ret; + unsigned long timeout; + + /* Set all ports to the disabled state. */ + for (i = 0; i < 8; i++) { + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + } + + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); + + /* Reset the switch. */ + REG_WRITE(REG_GLOBAL, 0x04, 0xc400); + + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = REG_READ(REG_GLOBAL, 0x00); + if ((ret & 0xc800) == 0xc800) + break; + + usleep_range(1000, 2000); + } + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + + /* Enable ports not under DSA, e.g. WAN port */ + for (i = 0; i < 8; i++) { + if (dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)) + continue; + + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret | 0x03); + } + + return 0; +} + +static int mv88e6171_setup_global(struct dsa_switch *ds) +{ + int ret; + int i; + + /* Disable the PHY polling unit (since there won't be any + * external PHYs to poll), don't discard packets with + * excessive collisions, and mask all interrupt sources. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0x0000); + + /* Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); + + /* Configure the priority mapping registers. */ + ret = mv88e6xxx_config_prio(ds); + if (ret < 0) + return ret; + + /* Configure the upstream port, and configure the upstream + * port as the port to which ingress and egress monitor frames + * are to be sent. + */ + if (REG_READ(REG_PORT(0), 0x03) == 0x1710) + REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1111)); + else + REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); + + /* Disable remote management for now, and set the switch's + * DSA device number. + */ + REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:2x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); + + /* Send all frames with destination addresses matching + * 01:80:c2:00:00:0x to the CPU port. + */ + REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); + + /* Disable the loopback filter, disable flow control + * messages, disable flood broadcast override, disable + * removing of provider tags, disable ATU age violation + * interrupts, disable tag flow control, force flow + * control priority to the highest, and send all special + * multicast frames to the CPU at the highest priority. + */ + REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); + + /* Program the DSA routing table. */ + for (i = 0; i < 32; i++) { + int nexthop; + + nexthop = 0x1f; + if (i != ds->index && i < ds->dst->pd->nr_chips) + nexthop = ds->pd->rtable[i] & 0x1f; + + REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); + } + + /* Clear all trunk masks. */ + for (i = 0; i < 8; i++) + REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff); + + /* Clear all trunk mappings. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); + + /* Disable ingress rate limiting by resetting all ingress + * rate limit registers to their initial state. + */ + for (i = 0; i < 6; i++) + REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); + + /* Initialise cross-chip port VLAN table to reset defaults. */ + REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); + + /* Clear the priority override table. */ + for (i = 0; i < 16; i++) + REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); + + /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */ + + return 0; +} + +static int mv88e6171_setup_port(struct dsa_switch *ds, int p) +{ + int addr = REG_PORT(p); + u16 val; + + /* MAC Forcing register: don't force link, speed, duplex + * or flow control state to any particular values on physical + * ports, but force the CPU port and all DSA ports to 1000 Mb/s + * full duplex. + */ + val = REG_READ(addr, 0x01); + if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) + REG_WRITE(addr, 0x01, val | 0x003e); + else + REG_WRITE(addr, 0x01, val | 0x0003); + + /* Do not limit the period of time that this port can be + * paused for by the remote end or the period of time that + * this port can pause the remote end. + */ + REG_WRITE(addr, 0x02, 0x0000); + + /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, + * disable Header mode, enable IGMP/MLD snooping, disable VLAN + * tunneling, determine priority by looking at 802.1p and IP + * priority fields (IP prio has precedence), and set STP state + * to Forwarding. + * + * If this is the CPU link, use DSA or EDSA tagging depending + * on which tagging mode was configured. + * + * If this is a link to another switch, use DSA tagging mode. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown unicasts and multicasts. + */ + val = 0x0433; + if (dsa_is_cpu_port(ds, p)) { + if (ds->dst->tag_protocol == htons(ETH_P_EDSA)) + val |= 0x3300; + else + val |= 0x0100; + } + if (ds->dsa_port_mask & (1 << p)) + val |= 0x0100; + if (p == dsa_upstream_port(ds)) + val |= 0x000c; + REG_WRITE(addr, 0x04, val); + + /* Port Control 1: disable trunking. Also, if this is the + * CPU port, enable learn messages to be sent to this port. + */ + REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); + + /* Port based VLAN map: give each port its own address + * database, allow the CPU port to talk to each of the 'real' + * ports, and allow each of the 'real' ports to only talk to + * the upstream port. + */ + val = (p & 0xf) << 12; + if (dsa_is_cpu_port(ds, p)) + val |= ds->phys_port_mask; + else + val |= 1 << dsa_upstream_port(ds); + REG_WRITE(addr, 0x06, val); + + /* Default VLAN ID and priority: don't set a default VLAN + * ID, and set the default packet priority to zero. + */ + REG_WRITE(addr, 0x07, 0x0000); + + /* Port Control 2: don't force a good FCS, set the maximum + * frame size to 10240 bytes, don't let the switch add or + * strip 802.1q tags, don't discard tagged or untagged frames + * on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't + * send a copy of all transmitted/received frames on this port + * to the CPU. + */ + REG_WRITE(addr, 0x08, 0x2080); + + /* Egress rate control: disable egress rate control. */ + REG_WRITE(addr, 0x09, 0x0001); + + /* Egress rate control 2: disable egress rate control. */ + REG_WRITE(addr, 0x0a, 0x0000); + + /* Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + REG_WRITE(addr, 0x0b, 1 << p); + + /* Port ATU control: disable limiting the number of address + * database entries that this port is allowed to use. + */ + REG_WRITE(addr, 0x0c, 0x0000); + + /* Priority Override: disable DA, SA and VTU priority override. */ + REG_WRITE(addr, 0x0d, 0x0000); + + /* Port Ethertype: use the Ethertype DSA Ethertype value. */ + REG_WRITE(addr, 0x0f, ETH_P_EDSA); + + /* Tag Remap: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x18, 0x3210); + + /* Tag Remap 2: use an identity 802.1p prio -> switch prio + * mapping. + */ + REG_WRITE(addr, 0x19, 0x7654); + + return 0; +} + +static int mv88e6171_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); + int i; + int ret; + + mutex_init(&ps->smi_mutex); + mutex_init(&ps->stats_mutex); + + ret = mv88e6171_switch_reset(ds); + if (ret < 0) + return ret; + + /* @@@ initialise vtu and atu */ + + ret = mv88e6171_setup_global(ds); + if (ret < 0) + return ret; + + for (i = 0; i < 8; i++) { + if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i))) + continue; + + ret = mv88e6171_setup_port(ds, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6171_port_to_phy_addr(int port) +{ + if (port >= 0 && port <= 4) + return port; + return -1; +} + +static int +mv88e6171_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + int addr = mv88e6171_port_to_phy_addr(port); + + return mv88e6xxx_phy_read(ds, addr, regnum); +} + +static int +mv88e6171_phy_write(struct dsa_switch *ds, + int port, int regnum, u16 val) +{ + int addr = mv88e6171_port_to_phy_addr(port); + + return mv88e6xxx_phy_write(ds, addr, regnum, val); +} + +static struct mv88e6xxx_hw_stat mv88e6171_hw_stats[] = { + { "in_good_octets", 8, 0x00, }, + { "in_bad_octets", 4, 0x02, }, + { "in_unicast", 4, 0x04, }, + { "in_broadcasts", 4, 0x06, }, + { "in_multicasts", 4, 0x07, }, + { "in_pause", 4, 0x16, }, + { "in_undersize", 4, 0x18, }, + { "in_fragments", 4, 0x19, }, + { "in_oversize", 4, 0x1a, }, + { "in_jabber", 4, 0x1b, }, + { "in_rx_error", 4, 0x1c, }, + { "in_fcs_error", 4, 0x1d, }, + { "out_octets", 8, 0x0e, }, + { "out_unicast", 4, 0x10, }, + { "out_broadcasts", 4, 0x13, }, + { "out_multicasts", 4, 0x12, }, + { "out_pause", 4, 0x15, }, + { "excessive", 4, 0x11, }, + { "collisions", 4, 0x1e, }, + { "deferred", 4, 0x05, }, + { "single", 4, 0x14, }, + { "multiple", 4, 0x17, }, + { "out_fcs_error", 4, 0x03, }, + { "late", 4, 0x1f, }, + { "hist_64bytes", 4, 0x08, }, + { "hist_65_127bytes", 4, 0x09, }, + { "hist_128_255bytes", 4, 0x0a, }, + { "hist_256_511bytes", 4, 0x0b, }, + { "hist_512_1023bytes", 4, 0x0c, }, + { "hist_1024_max_bytes", 4, 0x0d, }, +}; + +static void +mv88e6171_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6171_hw_stats), + mv88e6171_hw_stats, port, data); +} + +static void +mv88e6171_get_ethtool_stats(struct dsa_switch *ds, + int port, uint64_t *data) +{ + mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6171_hw_stats), + mv88e6171_hw_stats, port, data); +} + +static int mv88e6171_get_sset_count(struct dsa_switch *ds) +{ + return ARRAY_SIZE(mv88e6171_hw_stats); +} + +struct dsa_switch_driver mv88e6171_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_DSA, + .priv_size = sizeof(struct mv88e6xxx_priv_state), + .probe = mv88e6171_probe, + .setup = mv88e6171_setup, + .set_addr = mv88e6xxx_set_addr_indirect, + .phy_read = mv88e6171_phy_read, + .phy_write = mv88e6171_phy_write, + .poll_link = mv88e6xxx_poll_link, + .get_strings = mv88e6171_get_strings, + .get_ethtool_stats = mv88e6171_get_ethtool_stats, + .get_sset_count = mv88e6171_get_sset_count, +}; + +MODULE_ALIAS("platform:mv88e6171"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 9ce2146346b6..d6f6428b27dc 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -78,7 +78,7 @@ int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) int ret; mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_reg_read(ds->master_mii_bus, + ret = __mv88e6xxx_reg_read(to_mii_bus(ds->master_dev), ds->pd->sw_addr, addr, reg); mutex_unlock(&ps->smi_mutex); @@ -122,7 +122,7 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) int ret; mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_reg_write(ds->master_mii_bus, + ret = __mv88e6xxx_reg_write(to_mii_bus(ds->master_dev), ds->pd->sw_addr, addr, reg, val); mutex_unlock(&ps->smi_mutex); @@ -501,12 +501,18 @@ static int __init mv88e6xxx_init(void) #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) register_switch_driver(&mv88e6123_61_65_switch_driver); #endif +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) + register_switch_driver(&mv88e6171_switch_driver); +#endif return 0; } module_init(mv88e6xxx_init); static void __exit mv88e6xxx_cleanup(void) { +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171) + unregister_switch_driver(&mv88e6171_switch_driver); +#endif #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) unregister_switch_driver(&mv88e6123_61_65_switch_driver); #endif diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 911ede58dd12..5e5145ad9525 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -70,6 +70,7 @@ void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, extern struct dsa_switch_driver mv88e6131_switch_driver; extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; +extern struct dsa_switch_driver mv88e6171_switch_driver; #define REG_READ(addr, reg) \ ({ \ diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index a968654b631d..4547a1b8b958 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -695,9 +695,9 @@ el3_tx_timeout (struct net_device *dev) int ioaddr = dev->base_addr; /* Transmitter timeout, serious problems. */ - pr_warning("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d.\n", - dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), - inw(ioaddr + TX_FREE)); + pr_warn("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d\n", + dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), + inw(ioaddr + TX_FREE)); dev->stats.tx_errors++; dev->trans_start = jiffies; /* prevent tx timeout */ /* Issue TX_RESET and TX_START commands. */ diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 94c656f5a05d..942fb0d5aace 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -515,7 +515,7 @@ static struct net_device *corkscrew_scan(int unit) if (pnp_device_attach(idev) < 0) continue; if (pnp_activate_dev(idev) < 0) { - pr_warning("pnp activate failed (out of resources?)\n"); + pr_warn("pnp activate failed (out of resources?)\n"); pnp_device_detach(idev); continue; } @@ -659,7 +659,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, pr_cont(", IRQ %d\n", dev->irq); /* Tell them about an invalid IRQ. */ if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15)) - pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n"); + pr_warn(" *** Warning: this IRQ is unlikely to work! ***\n"); { static const char * const ram_split[] = { @@ -967,13 +967,13 @@ static void corkscrew_timeout(struct net_device *dev) struct corkscrew_private *vp = netdev_priv(dev); int ioaddr = dev->base_addr; - pr_warning("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", - dev->name, inb(ioaddr + TxStatus), - inw(ioaddr + EL3_STATUS)); + pr_warn("%s: transmit timed out, tx_status %2.2x status %4.4x\n", + dev->name, inb(ioaddr + TxStatus), + inw(ioaddr + EL3_STATUS)); /* Slight code bloat to be user friendly. */ if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) - pr_warning("%s: Transmitter encountered 16 collisions --" - " network cable problem?\n", dev->name); + pr_warn("%s: Transmitter encountered 16 collisions -- network cable problem?\n", + dev->name); #ifndef final_version pr_debug(" Flags; bus-master %d, full %d; dirty %d current %d.\n", vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, @@ -1382,13 +1382,10 @@ static int boomerang_rx(struct net_device *dev) temp = skb_put(skb, pkt_len); /* Remove this checking code for final release. */ if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp) - pr_warning("%s: Warning -- the skbuff addresses do not match" - " in boomerang_rx: %p vs. %p / %p.\n", - dev->name, - isa_bus_to_virt(vp-> - rx_ring[entry]. - addr), skb->head, - temp); + pr_warn("%s: Warning -- the skbuff addresses do not match in boomerang_rx: %p vs. %p / %p\n", + dev->name, + isa_bus_to_virt(vp->rx_ring[entry].addr), + skb->head, temp); rx_nocopy++; } skb->protocol = eth_type_trans(skb, dev); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 8ca49f04acec..86e621142d5b 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1310,8 +1310,8 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, pr_cont(", IRQ %d\n", dev->irq); /* Tell them about an invalid IRQ. */ if (dev->irq <= 0 || dev->irq >= nr_irqs) - pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", - dev->irq); + pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n", + dev->irq); step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; if (print_info) { @@ -1425,7 +1425,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, } mii_preamble_required--; if (phy_idx == 0) { - pr_warning(" ***WARNING*** No MII transceivers found!\n"); + pr_warn(" ***WARNING*** No MII transceivers found!\n"); vp->phys[0] = 24; } else { vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE); @@ -1566,8 +1566,7 @@ vortex_up(struct net_device *dev) pci_restore_state(VORTEX_PCI(vp)); err = pci_enable_device(VORTEX_PCI(vp)); if (err) { - pr_warning("%s: Could not enable device\n", - dev->name); + pr_warn("%s: Could not enable device\n", dev->name); goto err_out; } } @@ -2007,8 +2006,8 @@ vortex_error(struct net_device *dev, int status) /* This occurs when we have the wrong media type! */ if (DoneDidThat == 0 && ioread16(ioaddr + EL3_STATUS) & StatsFull) { - pr_warning("%s: Updating statistics failed, disabling " - "stats as an interrupt source.\n", dev->name); + pr_warn("%s: Updating statistics failed, disabling stats as an interrupt source\n", + dev->name); iowrite16(SetIntrEnb | (window_read16(vp, 5, 10) & ~StatsFull), ioaddr + EL3_CMD); @@ -2148,8 +2147,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { if (vortex_debug > 0) - pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", - dev->name); + pr_warn("%s: BUG! Tx Ring full, refusing to send buffer\n", + dev->name); netif_stop_queue(dev); return NETDEV_TX_BUSY; } @@ -2343,7 +2342,7 @@ vortex_interrupt(int irq, void *dev_id) } if (--work_done < 0) { - pr_warning("%s: Too much work in interrupt, status %4.4x.\n", + pr_warn("%s: Too much work in interrupt, status %4.4x\n", dev->name, status); /* Disable all pending interrupts. */ do { @@ -2476,7 +2475,7 @@ boomerang_interrupt(int irq, void *dev_id) vortex_error(dev, status); if (--work_done < 0) { - pr_warning("%s: Too much work in interrupt, status %4.4x.\n", + pr_warn("%s: Too much work in interrupt, status %4.4x\n", dev->name, status); /* Disable all pending interrupts. */ do { @@ -2652,7 +2651,8 @@ boomerang_rx(struct net_device *dev) if (skb == NULL) { static unsigned long last_jif; if (time_after(jiffies, last_jif + 10 * HZ)) { - pr_warning("%s: memory shortage\n", dev->name); + pr_warn("%s: memory shortage\n", + dev->name); last_jif = jiffies; } if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) @@ -2751,7 +2751,8 @@ vortex_close(struct net_device *dev) if (vp->rx_csumhits && (vp->drv_flags & HAS_HWCKSM) == 0 && (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) { - pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name); + pr_warn("%s supports hardware checksums, and we're not using them!\n", + dev->name); } #endif diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 31c48a7ac2b6..6c323f4f457b 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1140,7 +1140,6 @@ static const struct net_device_ops au1000_netdev_ops = { static int au1000_probe(struct platform_device *pdev) { - static unsigned version_printed; struct au1000_private *aup = NULL; struct au1000_eth_platform_data *pd; struct net_device *dev = NULL; @@ -1371,9 +1370,8 @@ static int au1000_probe(struct platform_device *pdev) netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n", (unsigned long)base->start, irq); - if (version_printed++ == 0) - pr_info("%s version %s %s\n", - DRV_NAME, DRV_VERSION, DRV_AUTHOR); + + pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); return 0; diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index abf3b1581c82..5b22764ba88d 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -621,7 +621,7 @@ static int nmclan_config(struct pcmcia_device *link) ret = pcmcia_request_io(link); if (ret) goto failed; - ret = pcmcia_request_exclusive_irq(link, mace_interrupt); + ret = pcmcia_request_irq(link, mace_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index cc25a3a9e7cf..caade30820d5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -271,7 +271,6 @@ #define DMA_PBL_X8_DISABLE 0x00 #define DMA_PBL_X8_ENABLE 0x01 - /* MAC register offsets */ #define MAC_TCR 0x0000 #define MAC_RCR 0x0004 @@ -792,7 +791,6 @@ #define MTL_Q_DISABLED 0x00 #define MTL_Q_ENABLED 0x02 - /* MTL traffic class register offsets * Multiple traffic classes can be active. The first class has registers * that begin at 0x1100. Each subsequent queue has registers that @@ -815,7 +813,6 @@ #define MTL_TSA_SP 0x00 #define MTL_TSA_ETS 0x02 - /* PCS MMD select register offset * The MMD select register is used for accessing PCS registers * when the underlying APB3 interface is using indirect addressing. @@ -825,7 +822,6 @@ */ #define PCS_MMD_SELECT 0xff - /* Descriptor/Packet entry bit positions and sizes */ #define RX_PACKET_ERRORS_CRC_INDEX 2 #define RX_PACKET_ERRORS_CRC_WIDTH 1 @@ -929,7 +925,6 @@ #define MDIO_AN_COMP_STAT 0x0030 #endif - /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable @@ -957,7 +952,6 @@ do { \ ((0x1 << (_width)) - 1)) << (_index))); \ } while (0) - /* Bit setting and getting macros based on register fields * The get macro uses the bit field definitions formed using the input * names to extract the current bit field value from within the @@ -986,7 +980,6 @@ do { \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) - /* Macros for reading or writing registers * The ioread macros will get bit fields or full values using the * register definitions formed using the input names @@ -1014,7 +1007,6 @@ do { \ XGMAC_IOWRITE((_pdata), _reg, reg_val); \ } while (0) - /* Macros for reading or writing MTL queue or traffic class registers * Similar to the standard read and write macros except that the * base register value is calculated by the queue or traffic class number @@ -1041,7 +1033,6 @@ do { \ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \ } while (0) - /* Macros for reading or writing DMA channel registers * Similar to the standard read and write macros except that the * base register value is obtained from the ring @@ -1066,7 +1057,6 @@ do { \ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \ } while (0) - /* Macros for building, reading or writing register values or bits * within the register values of XPCS registers. */ @@ -1076,7 +1066,6 @@ do { \ #define XPCS_IOREAD(_pdata, _off) \ ioread32((_pdata)->xpcs_regs + (_off)) - /* Macros for building, reading or writing register values or bits * using MDIO. Different from above because of the use of standardized * Linux include values. No shifting is performed with the bit diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c index 7d6a49b24321..8a50b01c2686 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -120,7 +120,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static int xgbe_dcb_ieee_getets(struct net_device *netdev, struct ieee_ets *ets) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index a3c11355a34d..76479d04b903 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -121,7 +121,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static ssize_t xgbe_common_read(char __user *buffer, size_t count, loff_t *ppos, unsigned int value) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 1c5d62e8dab6..6fc5da01437d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -117,7 +117,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *); static void xgbe_free_ring(struct xgbe_prv_data *pdata, @@ -524,11 +523,8 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel) /* Allocate skb & assign to each rdesc */ skb = dev_alloc_skb(pdata->rx_buf_size); - if (skb == NULL) { - netdev_alert(pdata->netdev, - "failed to allocate skb\n"); + if (skb == NULL) break; - } skb_dma = dma_map_single(pdata->dev, skb->data, pdata->rx_buf_size, DMA_FROM_DEVICE); if (dma_mapping_error(pdata->dev, skb_dma)) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index ea273836d999..9da3a03e8c07 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -122,7 +122,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index b26d75856553..29554992215a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -126,7 +126,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static int xgbe_poll(struct napi_struct *, int); static void xgbe_set_rx_mode(struct net_device *); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 46f613028e9c..49508ec98b72 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -121,7 +121,6 @@ #include "xgbe.h" #include "xgbe-common.h" - struct xgbe_stats { char stat_string[ETH_GSTRING_LEN]; int stat_size; @@ -173,6 +172,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), }; + #define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats) static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index bdf9cfa70e88..f5a8fa03921a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -128,7 +128,6 @@ #include "xgbe.h" #include "xgbe-common.h" - MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(XGBE_DRV_VERSION); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 6d2221e023f4..363b210560f3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -123,7 +123,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg) { struct xgbe_prv_data *pdata = mii->priv; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c index 37e64cfa5718..a1bf9d1cdae1 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c @@ -122,7 +122,6 @@ #include "xgbe.h" #include "xgbe-common.h" - static cycle_t xgbe_cc_read(const struct cyclecounter *cc) { struct xgbe_prv_data *pdata = container_of(cc, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index e9fe6e6ddcc3..789957d43a13 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -128,7 +128,6 @@ #include <linux/net_tstamp.h> #include <net/dcbnl.h> - #define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_VERSION "1.0.0-a" #define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver" @@ -199,7 +198,6 @@ ((_ring)->rdata + \ ((_idx) & ((_ring)->rdesc_count - 1))) - /* Default coalescing parameters */ #define XGMAC_INIT_DMA_TX_USECS 50 #define XGMAC_INIT_DMA_TX_FRAMES 25 diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 514c57fd26f1..8e262e2b39b6 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -17,10 +17,14 @@ config NET_VENDOR_ARC if NET_VENDOR_ARC -config ARC_EMAC - tristate "ARC EMAC support" +config ARC_EMAC_CORE + tristate select MII select PHYLIB + +config ARC_EMAC + tristate "ARC EMAC support" + select ARC_EMAC_CORE depends on OF_IRQ depends on OF_NET ---help--- @@ -28,4 +32,14 @@ config ARC_EMAC non-standard on-chip ethernet device ARC EMAC 10/100 is used. Say Y here if you have such a board. If unsure, say N. +config EMAC_ROCKCHIP + tristate "Rockchip EMAC support" + select ARC_EMAC_CORE + depends on OF_IRQ && OF_NET && REGULATOR + ---help--- + Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. + This selects Rockchip SoC glue layer support for the + emac device driver. This driver is used for RK3066/RK3188 + EMAC ethernet controller. + endif # NET_VENDOR_ARC diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile index 00c8657637d5..79108af553fb 100644 --- a/drivers/net/ethernet/arc/Makefile +++ b/drivers/net/ethernet/arc/Makefile @@ -3,4 +3,6 @@ # arc_emac-objs := emac_main.o emac_mdio.o -obj-$(CONFIG_ARC_EMAC) += arc_emac.o +obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o +obj-$(CONFIG_ARC_EMAC) += emac_arc.o +obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index 36cc9bd07c47..dae1ac300a49 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -123,6 +123,10 @@ struct buffer_state { * @speed: PHY's last set speed. */ struct arc_emac_priv { + const char *drv_name; + const char *drv_version; + void (*set_mac_speed)(void *priv, unsigned int speed); + /* Devices */ struct device *dev; struct phy_device *phy_dev; @@ -204,7 +208,9 @@ static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask) arc_reg_set(priv, reg, value & ~mask); } -int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv); +int arc_mdio_probe(struct arc_emac_priv *priv); int arc_mdio_remove(struct arc_emac_priv *priv); +int arc_emac_probe(struct net_device *ndev, int interface); +int arc_emac_remove(struct net_device *ndev); #endif /* ARC_EMAC_H */ diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c new file mode 100644 index 000000000000..f9cb99bfb511 --- /dev/null +++ b/drivers/net/ethernet/arc/emac_arc.c @@ -0,0 +1,95 @@ +/** + * emac_arc.c - ARC EMAC specific glue layer + * + * Copyright (C) 2014 Romain Perier + * + * Romain Perier <romain.perier@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> + +#include "emac.h" + +#define DRV_NAME "emac_arc" +#define DRV_VERSION "1.0" + +static int emac_arc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct arc_emac_priv *priv; + int interface, err; + + if (!dev->of_node) + return -ENODEV; + + ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); + if (!ndev) + return -ENOMEM; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + + priv = netdev_priv(ndev); + priv->drv_name = DRV_NAME; + priv->drv_version = DRV_VERSION; + + interface = of_get_phy_mode(dev->of_node); + if (interface < 0) + interface = PHY_INTERFACE_MODE_MII; + + priv->clk = devm_clk_get(dev, "hclk"); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to retrieve host clock from device tree\n"); + err = -EINVAL; + goto out_netdev; + } + + err = arc_emac_probe(ndev, interface); +out_netdev: + if (err) + free_netdev(ndev); + return err; +} + +static int emac_arc_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + int err; + + err = arc_emac_remove(ndev); + free_netdev(ndev); + return err; +} + +static const struct of_device_id emac_arc_dt_ids[] = { + { .compatible = "snps,arc-emac" }, + { /* Sentinel */ } +}; + +static struct platform_driver emac_arc_driver = { + .probe = emac_arc_probe, + .remove = emac_arc_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = emac_arc_dt_ids, + }, +}; + +module_platform_driver(emac_arc_driver); + +MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>"); +MODULE_DESCRIPTION("ARC EMAC platform driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 5919394d9f58..abe1eabc0171 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -26,8 +26,6 @@ #include "emac.h" -#define DRV_NAME "arc_emac" -#define DRV_VERSION "1.0" /** * arc_emac_tx_avail - Return the number of available slots in the tx ring. @@ -61,6 +59,8 @@ static void arc_emac_adjust_link(struct net_device *ndev) if (priv->speed != phy_dev->speed) { priv->speed = phy_dev->speed; state_changed = 1; + if (priv->set_mac_speed) + priv->set_mac_speed(priv, priv->speed); } if (priv->duplex != phy_dev->duplex) { @@ -131,8 +131,10 @@ static int arc_emac_set_settings(struct net_device *ndev, static void arc_emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + struct arc_emac_priv *priv = netdev_priv(ndev); + + strlcpy(info->driver, priv->drv_name, sizeof(info->driver)); + strlcpy(info->version, priv->drv_version, sizeof(info->version)); } static const struct ethtool_ops arc_emac_ethtool_ops = { @@ -692,46 +694,38 @@ static const struct net_device_ops arc_emac_netdev_ops = { #endif }; -static int arc_emac_probe(struct platform_device *pdev) +int arc_emac_probe(struct net_device *ndev, int interface) { + struct device *dev = ndev->dev.parent; struct resource res_regs; struct device_node *phy_node; struct arc_emac_priv *priv; - struct net_device *ndev; const char *mac_addr; unsigned int id, clock_frequency, irq; int err; - if (!pdev->dev.of_node) - return -ENODEV; /* Get PHY from device tree */ - phy_node = of_parse_phandle(pdev->dev.of_node, "phy", 0); + phy_node = of_parse_phandle(dev->of_node, "phy", 0); if (!phy_node) { - dev_err(&pdev->dev, "failed to retrieve phy description from device tree\n"); + dev_err(dev, "failed to retrieve phy description from device tree\n"); return -ENODEV; } /* Get EMAC registers base address from device tree */ - err = of_address_to_resource(pdev->dev.of_node, 0, &res_regs); + err = of_address_to_resource(dev->of_node, 0, &res_regs); if (err) { - dev_err(&pdev->dev, "failed to retrieve registers base from device tree\n"); + dev_err(dev, "failed to retrieve registers base from device tree\n"); return -ENODEV; } /* Get IRQ from device tree */ - irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { - dev_err(&pdev->dev, "failed to retrieve <irq> value from device tree\n"); + dev_err(dev, "failed to retrieve <irq> value from device tree\n"); return -ENODEV; } - ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); - if (!ndev) - return -ENOMEM; - - platform_set_drvdata(pdev, ndev); - SET_NETDEV_DEV(ndev, &pdev->dev); ndev->netdev_ops = &arc_emac_netdev_ops; ndev->ethtool_ops = &arc_emac_ethtool_ops; @@ -740,60 +734,57 @@ static int arc_emac_probe(struct platform_device *pdev) ndev->flags &= ~IFF_MULTICAST; priv = netdev_priv(ndev); - priv->dev = &pdev->dev; + priv->dev = dev; - priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); + priv->regs = devm_ioremap_resource(dev, &res_regs); if (IS_ERR(priv->regs)) { - err = PTR_ERR(priv->regs); - goto out_netdev; + return PTR_ERR(priv->regs); } - dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); + dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); - priv->clk = of_clk_get(pdev->dev.of_node, 0); - if (IS_ERR(priv->clk)) { - /* Get CPU clock frequency from device tree */ - if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", - &clock_frequency)) { - dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n"); - err = -EINVAL; - goto out_netdev; - } - } else { + if (priv->clk) { err = clk_prepare_enable(priv->clk); if (err) { - dev_err(&pdev->dev, "failed to enable clock\n"); - goto out_clkget; + dev_err(dev, "failed to enable clock\n"); + return err; } clock_frequency = clk_get_rate(priv->clk); + } else { + /* Get CPU clock frequency from device tree */ + if (of_property_read_u32(dev->of_node, "clock-frequency", + &clock_frequency)) { + dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n"); + return -EINVAL; + } } id = arc_reg_get(priv, R_ID); /* Check for EMAC revision 5 or 7, magic number */ if (!(id == 0x0005fd02 || id == 0x0007fd02)) { - dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); + dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id); err = -ENODEV; goto out_clken; } - dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); + dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id); /* Set poll rate so that it polls every 1 ms */ arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); ndev->irq = irq; - dev_info(&pdev->dev, "IRQ is %d\n", ndev->irq); + dev_info(dev, "IRQ is %d\n", ndev->irq); /* Register interrupt handler for device */ - err = devm_request_irq(&pdev->dev, ndev->irq, arc_emac_intr, 0, + err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0, ndev->name, ndev); if (err) { - dev_err(&pdev->dev, "could not allocate IRQ\n"); + dev_err(dev, "could not allocate IRQ\n"); goto out_clken; } /* Get MAC address from device tree */ - mac_addr = of_get_mac_address(pdev->dev.of_node); + mac_addr = of_get_mac_address(dev->of_node); if (mac_addr) memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); @@ -801,14 +792,14 @@ static int arc_emac_probe(struct platform_device *pdev) eth_hw_addr_random(ndev); arc_emac_set_address_internal(ndev); - dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); + dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr); /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ - priv->rxbd = dmam_alloc_coherent(&pdev->dev, RX_RING_SZ + TX_RING_SZ, + priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ, &priv->rxbd_dma, GFP_KERNEL); if (!priv->rxbd) { - dev_err(&pdev->dev, "failed to allocate data buffers\n"); + dev_err(dev, "failed to allocate data buffers\n"); err = -ENOMEM; goto out_clken; } @@ -816,31 +807,31 @@ static int arc_emac_probe(struct platform_device *pdev) priv->txbd = priv->rxbd + RX_BD_NUM; priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ; - dev_dbg(&pdev->dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", + dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n", (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma); - err = arc_mdio_probe(pdev, priv); + err = arc_mdio_probe(priv); if (err) { - dev_err(&pdev->dev, "failed to probe MII bus\n"); + dev_err(dev, "failed to probe MII bus\n"); goto out_clken; } priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + interface); if (!priv->phy_dev) { - dev_err(&pdev->dev, "of_phy_connect() failed\n"); + dev_err(dev, "of_phy_connect() failed\n"); err = -ENODEV; goto out_mdio; } - dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", + dev_info(dev, "connected to %s phy with id 0x%x\n", priv->phy_dev->drv->name, priv->phy_dev->phy_id); netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); err = register_netdev(ndev); if (err) { - dev_err(&pdev->dev, "failed to register network device\n"); + dev_err(dev, "failed to register network device\n"); goto out_netif_api; } @@ -853,19 +844,14 @@ out_netif_api: out_mdio: arc_mdio_remove(priv); out_clken: - if (!IS_ERR(priv->clk)) + if (priv->clk) clk_disable_unprepare(priv->clk); -out_clkget: - if (!IS_ERR(priv->clk)) - clk_put(priv->clk); -out_netdev: - free_netdev(ndev); return err; } +EXPORT_SYMBOL_GPL(arc_emac_probe); -static int arc_emac_remove(struct platform_device *pdev) +int arc_emac_remove(struct net_device *ndev) { - struct net_device *ndev = platform_get_drvdata(pdev); struct arc_emac_priv *priv = netdev_priv(ndev); phy_disconnect(priv->phy_dev); @@ -876,31 +862,12 @@ static int arc_emac_remove(struct platform_device *pdev) if (!IS_ERR(priv->clk)) { clk_disable_unprepare(priv->clk); - clk_put(priv->clk); } - free_netdev(ndev); return 0; } - -static const struct of_device_id arc_emac_dt_ids[] = { - { .compatible = "snps,arc-emac" }, - { /* Sentinel */ } -}; -MODULE_DEVICE_TABLE(of, arc_emac_dt_ids); - -static struct platform_driver arc_emac_driver = { - .probe = arc_emac_probe, - .remove = arc_emac_remove, - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - .of_match_table = arc_emac_dt_ids, - }, -}; - -module_platform_driver(arc_emac_driver); +EXPORT_SYMBOL_GPL(arc_emac_remove); MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>"); MODULE_DESCRIPTION("ARC EMAC driver"); diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 26ba2423f33a..d5ee986936da 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -100,7 +100,6 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr, /** * arc_mdio_probe - MDIO probe function. - * @pdev: Pointer to platform device. * @priv: Pointer to ARC EMAC private data structure. * * returns: 0 on success, -ENOMEM when mdiobus_alloc @@ -108,7 +107,7 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr, * * Sets up and registers the MDIO interface. */ -int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv) +int arc_mdio_probe(struct arc_emac_priv *priv) { struct mii_bus *bus; int error; @@ -124,9 +123,9 @@ int arc_mdio_probe(struct platform_device *pdev, struct arc_emac_priv *priv) bus->read = &arc_mdio_read; bus->write = &arc_mdio_write; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name); - error = of_mdiobus_register(bus, pdev->dev.of_node); + error = of_mdiobus_register(bus, priv->dev->of_node); if (error) { dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name); mdiobus_free(bus); diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c new file mode 100644 index 000000000000..c31c7407b753 --- /dev/null +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -0,0 +1,229 @@ +/** + * emac-rockchip.c - Rockchip EMAC specific glue layer + * + * Copyright (C) 2014 Romain Perier <romain.perier@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/etherdevice.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#include "emac.h" + +#define DRV_NAME "rockchip_emac" +#define DRV_VERSION "1.0" + +#define GRF_MODE_MII (1UL << 0) +#define GRF_MODE_RMII (0UL << 0) +#define GRF_SPEED_10M (0UL << 1) +#define GRF_SPEED_100M (1UL << 1) +#define GRF_SPEED_ENABLE_BIT (1UL << 17) +#define GRF_MODE_ENABLE_BIT (1UL << 16) + +struct emac_rockchip_soc_data { + int grf_offset; +}; + +struct rockchip_priv_data { + struct arc_emac_priv emac; + struct regmap *grf; + const struct emac_rockchip_soc_data *soc_data; + struct regulator *regulator; + struct clk *refclk; +}; + +static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) +{ + struct rockchip_priv_data *emac = priv; + u32 data; + int err = 0; + + /* write-enable bits */ + data = GRF_SPEED_ENABLE_BIT; + + switch(speed) { + case 10: + data |= GRF_SPEED_10M; + break; + case 100: + data |= GRF_SPEED_100M; + break; + default: + pr_err("speed %u not supported\n", speed); + return; + } + + err = regmap_write(emac->grf, emac->soc_data->grf_offset, data); + if (err) + pr_err("unable to apply speed %u to grf (%d)\n", speed, err); +} + +static const struct emac_rockchip_soc_data emac_rockchip_dt_data[] = { + { .grf_offset = 0x154 }, /* rk3066 */ + { .grf_offset = 0x0a4 }, /* rk3188 */ +}; + +static const struct of_device_id emac_rockchip_dt_ids[] = { + { .compatible = "rockchip,rk3066-emac", .data = &emac_rockchip_dt_data[0] }, + { .compatible = "rockchip,rk3188-emac", .data = &emac_rockchip_dt_data[1] }, + { /* Sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, emac_rockchip_dt_ids); + +static int emac_rockchip_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct rockchip_priv_data *priv; + const struct of_device_id *match; + u32 data; + int err, interface; + + if (!pdev->dev.of_node) + return -ENODEV; + + ndev = alloc_etherdev(sizeof(struct rockchip_priv_data)); + if (!ndev) + return -ENOMEM; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + + priv = netdev_priv(ndev); + priv->emac.drv_name = DRV_NAME; + priv->emac.drv_version = DRV_VERSION; + priv->emac.set_mac_speed = emac_rockchip_set_mac_speed; + + interface = of_get_phy_mode(dev->of_node); + + /* RK3066 and RK3188 SoCs only support RMII */ + if (interface != PHY_INTERFACE_MODE_RMII) { + dev_err(dev, "unsupported phy interface mode %d\n", interface); + err = -ENOTSUPP; + goto out_netdev; + } + + priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); + if (IS_ERR(priv->grf)) { + dev_err(dev, "failed to retrieve global register file (%ld)\n", PTR_ERR(priv->grf)); + err = PTR_ERR(priv->grf); + goto out_netdev; + } + + match = of_match_node(emac_rockchip_dt_ids, dev->of_node); + priv->soc_data = match->data; + + priv->emac.clk = devm_clk_get(dev, "hclk"); + if (IS_ERR(priv->emac.clk)) { + dev_err(dev, "failed to retrieve host clock (%ld)\n", PTR_ERR(priv->emac.clk)); + err = PTR_ERR(priv->emac.clk); + goto out_netdev; + } + + priv->refclk = devm_clk_get(dev, "macref"); + if (IS_ERR(priv->refclk)) { + dev_err(dev, "failed to retrieve reference clock (%ld)\n", PTR_ERR(priv->refclk)); + err = PTR_ERR(priv->refclk); + goto out_netdev; + } + + err = clk_prepare_enable(priv->refclk); + if (err) { + dev_err(dev, "failed to enable reference clock (%d)\n", err); + goto out_netdev; + } + + /* Optional regulator for PHY */ + priv->regulator = devm_regulator_get_optional(dev, "phy"); + if (IS_ERR(priv->regulator)) { + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_err(dev, "no regulator found\n"); + priv->regulator = NULL; + } + + if (priv->regulator) { + err = regulator_enable(priv->regulator); + if (err) { + dev_err(dev, "failed to enable phy-supply (%d)\n", err); + goto out_clk_disable; + } + } + + err = arc_emac_probe(ndev, interface); + if (err) + goto out_regulator_disable; + + /* write-enable bits */ + data = GRF_MODE_ENABLE_BIT | GRF_SPEED_ENABLE_BIT; + + data |= GRF_SPEED_100M; + data |= GRF_MODE_RMII; + + err = regmap_write(priv->grf, priv->soc_data->grf_offset, data); + if (err) { + dev_err(dev, "unable to apply initial settings to grf (%d)\n", err); + goto out_regulator_disable; + } + + /* RMII interface needs always a rate of 50MHz */ + err = clk_set_rate(priv->refclk, 50000000); + if (err) + dev_err(dev, "failed to change reference clock rate (%d)\n", err); + return 0; + +out_regulator_disable: + if (priv->regulator) + regulator_disable(priv->regulator); +out_clk_disable: + clk_disable_unprepare(priv->refclk); +out_netdev: + free_netdev(ndev); + return err; +} + +static int emac_rockchip_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct rockchip_priv_data *priv = netdev_priv(ndev); + int err; + + err = arc_emac_remove(ndev); + + clk_disable_unprepare(priv->refclk); + + if (priv->regulator) + regulator_disable(priv->regulator); + + free_netdev(ndev); + return err; +} + +static struct platform_driver emac_rockchip_driver = { + .probe = emac_rockchip_probe, + .remove = emac_rockchip_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = emac_rockchip_dt_ids, + }, +}; + +module_platform_driver(emac_rockchip_driver); + +MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>"); +MODULE_DESCRIPTION("Rockchip EMAC platform driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index d8d07a818b89..c3e260c21734 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -122,6 +122,7 @@ config TIGON3 config BNX2X tristate "Broadcom NetXtremeII 10Gb support" depends on PCI + select PTP_1588_CLOCK select FW_LOADER select ZLIB_INFLATE select LIBCRC32C diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index d588136b23b9..416620fa8fac 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -427,7 +427,7 @@ static void b44_wap54g10_workaround(struct b44 *bp) } return; error: - pr_warning("PHY: cannot reset MII transceiver isolate bit\n"); + pr_warn("PHY: cannot reset MII transceiver isolate bit\n"); } #else static inline void b44_wap54g10_workaround(struct b44 *bp) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index d9b9170ed2fc..77f1ff7396ac 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -139,6 +139,15 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev, else reg &= ~RXCHK_SKIP_FCS; + /* If Broadcom tags are enabled (e.g: using a switch), make + * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom + * tag after the Ethernet MAC Source Address. + */ + if (netdev_uses_dsa(dev)) + reg |= RXCHK_BRCM_TAG_EN; + else + reg &= ~RXCHK_BRCM_TAG_EN; + rxchk_writel(priv, reg, RXCHK_CONTROL); return 0; @@ -1069,16 +1078,19 @@ static void bcm_sysport_adj_link(struct net_device *dev) if (!phydev->pause) cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; - if (changed) { + if (!changed) + return; + + if (phydev->link) { reg = umac_readl(priv, UMAC_CMD); reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | CMD_HD_EN | CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); reg |= cmd_bits; umac_writel(priv, reg, UMAC_CMD); - - phy_print_status(priv->phydev); } + + phy_print_status(priv->phydev); } static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d777fae86988..c3a6072134f5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -20,13 +20,17 @@ #include <linux/types.h> #include <linux/pci_regs.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/net_tstamp.h> +#include <linux/clocksource.h> + /* compilation time flags */ /* define this to make the driver freeze on error to allow getting debug info * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.78.19-0" +#define DRV_MODULE_VERSION "1.710.51-0" #define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 @@ -70,6 +74,7 @@ enum bnx2x_int_mode { #define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */ #define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */ #define BNX2X_MSG_IOV 0x0800000 +#define BNX2X_MSG_PTP 0x1000000 #define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/ #define BNX2X_MSG_ETHTOOL 0x4000000 #define BNX2X_MSG_DCB 0x8000000 @@ -1443,6 +1448,12 @@ struct bnx2x_fp_stats { struct bnx2x_eth_q_stats_old eth_q_stats_old; }; +enum { + SUB_MF_MODE_UNKNOWN = 0, + SUB_MF_MODE_UFP, + SUB_MF_MODE_NPAR1_DOT_5, +}; + struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure @@ -1587,10 +1598,11 @@ struct bnx2x { #define USING_SINGLE_MSIX_FLAG (1 << 20) #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define IS_VF_FLAG (1 << 22) -#define INTERRUPTS_ENABLED_FLAG (1 << 23) -#define BC_SUPPORTS_RMMOD_CMD (1 << 24) -#define HAS_PHYS_PORT_ID (1 << 25) -#define AER_ENABLED (1 << 26) +#define BC_SUPPORTS_RMMOD_CMD (1 << 23) +#define HAS_PHYS_PORT_ID (1 << 24) +#define AER_ENABLED (1 << 25) +#define PTP_SUPPORTED (1 << 26) +#define TX_TIMESTAMPING_EN (1 << 27) #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) @@ -1653,6 +1665,9 @@ struct bnx2x { #define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) #define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) #define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX) + u8 mf_sub_mode; +#define IS_MF_UFP(bp) (IS_MF_SD(bp) && \ + bp->mf_sub_mode == SUB_MF_MODE_UFP) u8 wol; @@ -1684,13 +1699,9 @@ struct bnx2x { #define BNX2X_STATE_ERROR 0xf000 #define BNX2X_MAX_PRIORITY 8 -#define BNX2X_MAX_ENTRIES_PER_PRI 16 -#define BNX2X_MAX_COS 3 -#define BNX2X_MAX_TX_COS 2 int num_queues; uint num_ethernet_queues; uint num_cnic_queues; - int num_napi_queues; int disable_tpa; u32 rx_mode; @@ -1933,6 +1944,19 @@ struct bnx2x { u8 phys_port_id[ETH_ALEN]; + /* PTP related context */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct work_struct ptp_task; + struct cyclecounter cyclecounter; + struct timecounter timecounter; + bool timecounter_init_done; + struct sk_buff *ptp_tx_skb; + unsigned long ptp_tx_start; + bool hwtstamp_ioctl_called; + u16 tx_type; + u16 rx_filter; + struct bnx2x_link_report_data vf_link_vars; }; @@ -2346,7 +2370,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, #define ATTN_HARD_WIRED_MASK 0xff00 #define ATTENTION_ID 4 -#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_SD(bp) || \ +#define IS_MF_STORAGE_ONLY(bp) (IS_MF_STORAGE_PERSONALITY_ONLY(bp) || \ IS_MF_FCOE_AFEX(bp)) /* stuff added to make the code fit 80Col */ @@ -2522,14 +2546,44 @@ void bnx2x_notify_link_changed(struct bnx2x *bp); #define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) #define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) +#define IS_MF_ISCSI_SI(bp) (IS_MF_SI(bp) && BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp)) + +#define IS_MF_ISCSI_ONLY(bp) (IS_MF_ISCSI_SD(bp) || IS_MF_ISCSI_SI(bp)) + +#define BNX2X_MF_EXT_PROTOCOL_MASK \ + (MACP_FUNC_CFG_FLAGS_ETHERNET | \ + MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD | \ + MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define BNX2X_MF_EXT_PROT(bp) ((bp)->mf_ext_config & \ + BNX2X_MF_EXT_PROTOCOL_MASK) -#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \ - MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) +#define BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp) \ + (BNX2X_MF_EXT_PROT(bp) & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp) \ + (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + +#define BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) \ + (BNX2X_MF_EXT_PROT(bp) == MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) + +#define IS_MF_FCOE_AFEX(bp) \ + (IS_MF_AFEX(bp) && BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp)) + +#define IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) \ + (IS_MF_SD(bp) && \ + (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ + BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) + +#define IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp) \ + (IS_MF_SI(bp) && \ + (BNX2X_IS_MF_EXT_PROTOCOL_ISCSI(bp) || \ + BNX2X_IS_MF_EXT_PROTOCOL_FCOE(bp))) + +#define IS_MF_STORAGE_PERSONALITY_ONLY(bp) \ + (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) || \ + IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp)) -#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp)) -#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ - (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ - BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) #define SET_FLAG(value, mask, flag) \ do {\ @@ -2559,4 +2613,11 @@ void bnx2x_update_mng_version(struct bnx2x *bp); #define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX)) +void bnx2x_init_ptp(struct bnx2x *bp); +int bnx2x_configure_ptp_filters(struct bnx2x *bp); +void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb); + +#define BNX2X_MAX_PHC_DRIFT 31000000 +#define BNX2X_PTP_TX_TIMEOUT + #endif /* bnx2x.h */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4ccc806b1150..40beef5bca88 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -21,6 +21,7 @@ #include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/ip.h> +#include <linux/crash_dump.h> #include <net/tcp.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> @@ -64,7 +65,7 @@ static int bnx2x_calc_num_queues(struct bnx2x *bp) int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues(); /* Reduce memory usage in kdump environment by using only one queue */ - if (reset_devices) + if (is_kdump_kernel()) nq = 1; nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp)); @@ -1063,6 +1064,11 @@ reuse_rx: skb_record_rx_queue(skb, fp->rx_queue); + /* Check if this packet was timestamped */ + if (unlikely(cqe->fast_path_cqe.type_error_flags & + (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT))) + bnx2x_set_rx_ts(bp, skb); + if (le16_to_cpu(cqe_fp->pars_flags.flags) & PARSING_FLAGS_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), @@ -1932,7 +1938,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp) bp->num_ethernet_queues = bnx2x_calc_num_queues(bp); /* override in STORAGE SD modes */ - if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) + if (IS_MF_STORAGE_ONLY(bp)) bp->num_ethernet_queues = 1; /* Add special queues */ @@ -2078,6 +2084,10 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); if (rss_obj->udp_rss_v6) __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); + + if (!CHIP_IS_E1x(bp)) + /* valid only for TUNN_MODE_GRE tunnel mode */ + __set_bit(BNX2X_RSS_GRE_INNER_HDRS, ¶ms.rss_flags); } else { __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags); } @@ -2800,7 +2810,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Initialize Rx filter. */ bnx2x_set_rx_mode_inner(bp); - /* Start the Tx */ + if (bp->flags & PTP_SUPPORTED) { + bnx2x_init_ptp(bp); + bnx2x_configure_ptp_filters(bp); + } + /* Start Tx */ switch (load_mode) { case LOAD_NORMAL: /* Tx queue should be only re-enabled */ @@ -3437,26 +3451,6 @@ exit_lbl: } #endif -static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, - u32 xmit_type) -{ - struct ipv6hdr *ipv6; - - *parsing_data |= (skb_shinfo(skb)->gso_size << - ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & - ETH_TX_PARSE_BD_E2_LSO_MSS; - - if (xmit_type & XMIT_GSO_ENC_V6) - ipv6 = inner_ipv6_hdr(skb); - else if (xmit_type & XMIT_GSO_V6) - ipv6 = ipv6_hdr(skb); - else - ipv6 = NULL; - - if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6) - *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; -} - /** * bnx2x_set_pbd_gso - update PBD in GSO case. * @@ -3466,7 +3460,6 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, */ static void bnx2x_set_pbd_gso(struct sk_buff *skb, struct eth_tx_parse_bd_e1x *pbd, - struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); @@ -3479,9 +3472,6 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb, bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - - /* GSO on 57710/57711 needs FW to calculate IP checksum */ - tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; } else { pbd->tcp_pseudo_csum = bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, @@ -3653,18 +3643,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, (__force u32)iph->tot_len - (__force u32)iph->frag_off; + outerip_len = iph->ihl << 1; + pbd2->fw_ip_csum_wo_len_flags_frag = bswab16(csum_fold((__force __wsum)csum)); } else { pbd2->fw_ip_hdr_to_payload_w = hlen_w - ((sizeof(struct ipv6hdr)) >> 1); + pbd_e2->data.tunnel_data.flags |= + ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER; } pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq); pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb)); - if (xmit_type & XMIT_GSO_V4) { + /* inner IP header info */ + if (xmit_type & XMIT_CSUM_ENC_V4) { pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id); pbd_e2->data.tunnel_data.pseudo_csum = @@ -3672,8 +3667,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, inner_ip_hdr(skb)->saddr, inner_ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - - outerip_len = ip_hdr(skb)->ihl << 1; } else { pbd_e2->data.tunnel_data.pseudo_csum = bswab16(~csum_ipv6_magic( @@ -3686,8 +3679,6 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, *global_data |= outerip_off | - (!!(xmit_type & XMIT_CSUM_V6) << - ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) | (outerip_len << ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) << @@ -3699,6 +3690,23 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb, } } +static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) +{ + struct ipv6hdr *ipv6; + + if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6))) + return; + + if (xmit_type & XMIT_GSO_ENC_V6) + ipv6 = inner_ipv6_hdr(skb); + else /* XMIT_GSO_V6 */ + ipv6 = ipv6_hdr(skb); + + if (ipv6->nexthdr == NEXTHDR_IPV6) + *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; +} + /* called with netif_tx_lock * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call * netif_wake_queue() @@ -3831,6 +3839,20 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + if (!(bp->flags & TX_TIMESTAMPING_EN)) { + BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); + } else if (bp->ptp_tx_skb) { + BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + } else { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + /* schedule check for Tx timestamp */ + bp->ptp_tx_skb = skb_get(skb); + bp->ptp_tx_start = jiffies; + schedule_work(&bp->ptp_task); + } + } + /* header nbd: indirectly zero other flags! */ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT; @@ -3852,12 +3874,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ +#ifndef BNX2X_STOP_ON_ERROR if (IS_VF(bp)) +#endif tx_start_bd->vlan_or_ethertype = cpu_to_le16(ntohs(eth->h_proto)); +#ifndef BNX2X_STOP_ON_ERROR else /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); +#endif } nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */ @@ -3915,6 +3941,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) xmit_type); } + bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type); /* Add the macs to the parsing BD if this is a vf or if * Tx Switching is enabled. */ @@ -3929,11 +3956,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) &pbd_e2->data.mac_addr.dst_mid, &pbd_e2->data.mac_addr.dst_lo, eth->h_dest); - } else if (bp->flags & TX_SWITCHING) { - bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, - &pbd_e2->data.mac_addr.dst_mid, - &pbd_e2->data.mac_addr.dst_lo, - eth->h_dest); + } else { + if (bp->flags & TX_SWITCHING) + bnx2x_set_fw_mac_addr( + &pbd_e2->data.mac_addr.dst_hi, + &pbd_e2->data.mac_addr.dst_mid, + &pbd_e2->data.mac_addr.dst_lo, + eth->h_dest); +#ifdef BNX2X_STOP_ON_ERROR + /* Enforce security is always set in Stop on Error - + * source mac should be present in the parsing BD + */ + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, + &pbd_e2->data.mac_addr.src_mid, + &pbd_e2->data.mac_addr.src_lo, + eth->h_source); +#endif } SET_FLAG(pbd_e2_parsing_data, @@ -3980,10 +4018,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) bd_prod); } if (!CHIP_IS_E1x(bp)) - bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, - xmit_type); + pbd_e2_parsing_data |= + (skb_shinfo(skb)->gso_size << + ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & + ETH_TX_PARSE_BD_E2_LSO_MSS; else - bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type); + bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); } /* Set the PBD's parsing_data field if not zero @@ -4191,14 +4231,13 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) struct bnx2x *bp = netdev_priv(dev); int rc = 0; - if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) { + if (!is_valid_ether_addr(addr->sa_data)) { BNX2X_ERR("Requested MAC address is not valid\n"); return -EINVAL; } - if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) && - !is_zero_ether_addr(addr->sa_data)) { - BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); + if (IS_MF_STORAGE_ONLY(bp)) { + BNX2X_ERR("Can't change address on STORAGE ONLY function\n"); return -EINVAL; } @@ -4377,8 +4416,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) u8 cos; int rx_ring_size = 0; - if (!bp->rx_ring_size && - (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { + if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) { rx_ring_size = MIN_RX_SIZE_NONTPA; bp->rx_ring_size = rx_ring_size; } else if (!bp->rx_ring_size) { @@ -4771,11 +4809,15 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); /* TPA requires Rx CSUM offloading */ - if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) { + if (!(features & NETIF_F_RXCSUM)) { features &= ~NETIF_F_LRO; features &= ~NETIF_F_GRO; } + /* Note: do not disable SW GRO in kernel when HW GRO is off */ + if (bp->disable_tpa) + features &= ~NETIF_F_LRO; + return features; } @@ -4814,6 +4856,10 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features) if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) changes &= ~GRO_ENABLE_FLAG; + /* if GRO is changed while HW TPA is off, don't force a reload */ + if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa) + changes &= ~GRO_ENABLE_FLAG; + if (changes) bnx2x_reload = true; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 571427c7226b..adcacda7af7b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -932,8 +932,15 @@ static inline int bnx2x_func_start(struct bnx2x *bp) else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - start_params->gre_tunnel_mode = L2GRE_TUNNEL; - start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS; + start_params->tunnel_mode = TUNN_MODE_GRE; + start_params->gre_tunnel_type = IPGRE_TUNNEL; + start_params->inner_gre_rss_en = 1; + + if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) { + start_params->class_fail_ethtype = ETH_P_FIP; + start_params->class_fail = 1; + start_params->no_added_tags = 1; + } return bnx2x_func_state_change(bp, &func_params); } @@ -1297,15 +1304,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set) } } -static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) -{ - if (is_valid_ether_addr(addr) || - (is_zero_ether_addr(addr) && - (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))) - return true; - return false; -} /** * bnx2x_fill_fw_str - Fill buffer with FW version string diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index fb26bc4c42a1..6e4294ed1fc9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -2092,7 +2092,6 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); - int rc = 0; DP(BNX2X_MSG_DCB, "SET-ALL\n"); @@ -2110,9 +2109,7 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) 1); bnx2x_dcbx_init(bp, true); } - DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); - if (rc) - return 1; + DP(BNX2X_MSG_DCB, "set_dcbx_params done\n"); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index 12eb4baee9f6..741aa130c19f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -40,7 +40,7 @@ struct dump_header { u32 dump_meta_data; /* OR of CHIP and PATH. */ }; -#define BNX2X_DUMP_VERSION 0x50acff01 +#define BNX2X_DUMP_VERSION 0x61111111 struct reg_addr { u32 addr; u32 size; @@ -1464,7 +1464,6 @@ static const struct reg_addr reg_addrs[] = { { 0x180398, 1, 0x1c, 0x924}, { 0x1803a0, 5, 0x1c, 0x924}, { 0x1803b4, 2, 0x18, 0x924}, - { 0x180400, 256, 0x3, 0xfff}, { 0x181000, 4, 0x1f, 0x93c}, { 0x181010, 1020, 0x1f, 0x38}, { 0x182000, 4, 0x18, 0x924}, @@ -1576,7 +1575,6 @@ static const struct reg_addr reg_addrs[] = { { 0x200398, 1, 0x1c, 0x924}, { 0x2003a0, 1, 0x1c, 0x924}, { 0x2003a8, 2, 0x1c, 0x924}, - { 0x200400, 256, 0x3, 0xfff}, { 0x202000, 4, 0x1f, 0x1927}, { 0x202010, 2044, 0x1f, 0x1007}, { 0x204000, 4, 0x18, 0x924}, @@ -1688,7 +1686,6 @@ static const struct reg_addr reg_addrs[] = { { 0x280398, 1, 0x1c, 0x924}, { 0x2803a0, 1, 0x1c, 0x924}, { 0x2803a8, 2, 0x1c, 0x924}, - { 0x280400, 256, 0x3, 0xfff}, { 0x282000, 4, 0x1f, 0x9e4}, { 0x282010, 2044, 0x1f, 0x1c0}, { 0x284000, 4, 0x18, 0x924}, @@ -1800,7 +1797,6 @@ static const struct reg_addr reg_addrs[] = { { 0x300398, 1, 0x1c, 0x924}, { 0x3003a0, 1, 0x1c, 0x924}, { 0x3003a8, 2, 0x1c, 0x924}, - { 0x300400, 256, 0x3, 0xfff}, { 0x302000, 4, 0x1f, 0xf24}, { 0x302010, 2044, 0x1f, 0xe00}, { 0x304000, 4, 0x18, 0x924}, @@ -2206,10 +2202,10 @@ static const struct wreg_addr wreg_addr_e3b0 = { 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff}; static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = { - {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812, - 26247, 35655, 19074}, - {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804, - 26977, 40957, 35895}, + {19758, 17543, 26951, 18705, 17287, 26695, 19812, 31367, 40775, 19788, + 25223, 34631, 19074}, + {31750, 18273, 32253, 30697, 18017, 31997, 31804, 32097, 46077, 31780, + 25953, 39933, 35895}, {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557, 25608, 41377, 43903}, {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 92fee842f954..1edc931b1458 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1852,7 +1852,7 @@ static int bnx2x_set_ringparam(struct net_device *dev, if ((ering->rx_pending > MAX_RX_AVAIL) || (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) || - (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) || + (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) || (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; @@ -3481,6 +3481,46 @@ static int bnx2x_set_channels(struct net_device *dev, return bnx2x_nic_load(bp, LOAD_NORMAL); } +static int bnx2x_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct bnx2x *bp = netdev_priv(dev); + + if (bp->flags & PTP_SUPPORTED) { + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (bp->ptp_clock) + info->phc_index = ptp_clock_index(bp->ptp_clock); + else + info->phc_index = -1; + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ); + + info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON); + + return 0; + } + + return ethtool_op_get_ts_info(dev, info); +} + static const struct ethtool_ops bnx2x_ethtool_ops = { .get_settings = bnx2x_get_settings, .set_settings = bnx2x_set_settings, @@ -3522,7 +3562,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_module_eeprom = bnx2x_get_module_eeprom, .get_eee = bnx2x_get_eee, .set_eee = bnx2x_set_eee, - .get_ts_info = ethtool_op_get_ts_info, + .get_ts_info = bnx2x_get_ts_info, }; static const struct ethtool_ops bnx2x_vf_ethtool_ops = { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 95dc36543548..7636e3c18771 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -10,170 +10,170 @@ #ifndef BNX2X_FW_DEFS_H #define BNX2X_FW_DEFS_H -#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[148].base) +#define CSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[152].base) #define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[147].base + ((assertListEntry) * IRO[147].m1)) + (IRO[151].base + ((assertListEntry) * IRO[151].m1)) #define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \ - (IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \ - IRO[153].m2)) + (IRO[157].base + (((pfId)>>1) * IRO[157].m1) + (((pfId)&1) * \ + IRO[157].m2)) #define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \ - (IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \ - IRO[154].m2)) + (IRO[158].base + (((pfId)>>1) * IRO[158].m1) + (((pfId)&1) * \ + IRO[158].m2)) #define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \ - (IRO[159].base + ((funcId) * IRO[159].m1)) + (IRO[163].base + ((funcId) * IRO[163].m1)) #define CSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[149].base + ((funcId) * IRO[149].m1)) + (IRO[153].base + ((funcId) * IRO[153].m1)) #define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \ - (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2)) + (IRO[143].base + ((hcIndex) * IRO[143].m1) + ((sbId) * IRO[143].m2)) #define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \ - (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \ - * IRO[138].m2) + ((sbId) * IRO[138].m3)) -#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) + (IRO[142].base + (((hcIndex)>>2) * IRO[142].m1) + (((hcIndex)&3) \ + * IRO[142].m2) + ((sbId) * IRO[142].m3)) +#define CSTORM_IGU_MODE_OFFSET (IRO[161].base) #define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[317].base + ((pfId) * IRO[317].m1)) + (IRO[323].base + ((pfId) * IRO[323].m1)) #define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[318].base + ((pfId) * IRO[318].m1)) + (IRO[324].base + ((pfId) * IRO[324].m1)) #define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ - (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) + (IRO[316].base + ((pfId) * IRO[316].m1) + ((iscsiEqId) * IRO[316].m2)) #define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) + (IRO[318].base + ((pfId) * IRO[318].m1) + ((iscsiEqId) * IRO[318].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ - (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) + (IRO[317].base + ((pfId) * IRO[317].m1) + ((iscsiEqId) * IRO[317].m2)) #define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ - (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) + (IRO[319].base + ((pfId) * IRO[319].m1) + ((iscsiEqId) * IRO[319].m2)) #define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ - (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) -#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2)) +#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ + (IRO[321].base + ((pfId) * IRO[321].m1) + ((iscsiEqId) * IRO[321].m2)) #define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ - (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) + (IRO[320].base + ((pfId) * IRO[320].m1) + ((iscsiEqId) * IRO[320].m2)) #define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[316].base + ((pfId) * IRO[316].m1)) + (IRO[322].base + ((pfId) * IRO[322].m1)) #define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[308].base + ((pfId) * IRO[308].m1)) + (IRO[314].base + ((pfId) * IRO[314].m1)) #define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[307].base + ((pfId) * IRO[307].m1)) + (IRO[313].base + ((pfId) * IRO[313].m1)) #define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[306].base + ((pfId) * IRO[306].m1)) + (IRO[312].base + ((pfId) * IRO[312].m1)) #define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[151].base + ((funcId) * IRO[151].m1)) + (IRO[155].base + ((funcId) * IRO[155].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ - (IRO[142].base + ((pfId) * IRO[142].m1)) + (IRO[146].base + ((pfId) * IRO[146].m1)) #define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \ - (IRO[143].base + ((pfId) * IRO[143].m1)) + (IRO[147].base + ((pfId) * IRO[147].m1)) #define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \ - (IRO[141].base + ((pfId) * IRO[141].m1)) -#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size) + (IRO[145].base + ((pfId) * IRO[145].m1)) +#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[145].size) #define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \ - (IRO[144].base + ((pfId) * IRO[144].m1)) -#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size) + (IRO[148].base + ((pfId) * IRO[148].m1)) +#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[148].size) #define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \ - (IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2)) + (IRO[140].base + ((sbId) * IRO[140].m1) + ((hcIndex) * IRO[140].m2)) #define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \ - (IRO[133].base + ((sbId) * IRO[133].m1)) + (IRO[137].base + ((sbId) * IRO[137].m1)) #define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \ - (IRO[134].base + ((sbId) * IRO[134].m1)) + (IRO[138].base + ((sbId) * IRO[138].m1)) #define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \ - (IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2)) + (IRO[139].base + ((sbId) * IRO[139].m1) + ((hcIndex) * IRO[139].m2)) #define CSTORM_STATUS_BLOCK_OFFSET(sbId) \ - (IRO[132].base + ((sbId) * IRO[132].m1)) -#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size) + (IRO[136].base + ((sbId) * IRO[136].m1)) +#define CSTORM_STATUS_BLOCK_SIZE (IRO[136].size) #define CSTORM_SYNC_BLOCK_OFFSET(sbId) \ - (IRO[137].base + ((sbId) * IRO[137].m1)) -#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size) + (IRO[141].base + ((sbId) * IRO[141].m1)) +#define CSTORM_SYNC_BLOCK_SIZE (IRO[141].size) #define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \ - (IRO[155].base + ((vfId) * IRO[155].m1)) + (IRO[159].base + ((vfId) * IRO[159].m1)) #define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \ - (IRO[156].base + ((vfId) * IRO[156].m1)) + (IRO[160].base + ((vfId) * IRO[160].m1)) #define CSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[150].base + ((funcId) * IRO[150].m1)) + (IRO[154].base + ((funcId) * IRO[154].m1)) #define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ - (IRO[203].base + ((pfId) * IRO[203].m1)) + (IRO[207].base + ((pfId) * IRO[207].m1)) #define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) #define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \ (IRO[101].base + ((assertListEntry) * IRO[101].m1)) #define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \ - (IRO[201].base + ((pfId) * IRO[201].m1)) + (IRO[205].base + ((pfId) * IRO[205].m1)) #define TSTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[103].base + ((funcId) * IRO[103].m1)) + (IRO[107].base + ((funcId) * IRO[107].m1)) #define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[272].base + ((pfId) * IRO[272].m1)) + (IRO[278].base + ((pfId) * IRO[278].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \ - (IRO[273].base + ((pfId) * IRO[273].m1)) + (IRO[279].base + ((pfId) * IRO[279].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \ - (IRO[274].base + ((pfId) * IRO[274].m1)) + (IRO[280].base + ((pfId) * IRO[280].m1)) #define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \ - (IRO[275].base + ((pfId) * IRO[275].m1)) + (IRO[281].base + ((pfId) * IRO[281].m1)) #define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[271].base + ((pfId) * IRO[271].m1)) + (IRO[277].base + ((pfId) * IRO[277].m1)) #define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[270].base + ((pfId) * IRO[270].m1)) + (IRO[276].base + ((pfId) * IRO[276].m1)) #define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[269].base + ((pfId) * IRO[269].m1)) + (IRO[275].base + ((pfId) * IRO[275].m1)) #define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[268].base + ((pfId) * IRO[268].m1)) + (IRO[274].base + ((pfId) * IRO[274].m1)) #define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ - (IRO[278].base + ((pfId) * IRO[278].m1)) + (IRO[284].base + ((pfId) * IRO[284].m1)) #define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[264].base + ((pfId) * IRO[264].m1)) + (IRO[270].base + ((pfId) * IRO[270].m1)) #define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[265].base + ((pfId) * IRO[265].m1)) + (IRO[271].base + ((pfId) * IRO[271].m1)) #define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[266].base + ((pfId) * IRO[266].m1)) + (IRO[272].base + ((pfId) * IRO[272].m1)) #define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ - (IRO[267].base + ((pfId) * IRO[267].m1)) + (IRO[273].base + ((pfId) * IRO[273].m1)) #define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \ - (IRO[202].base + ((pfId) * IRO[202].m1)) + (IRO[206].base + ((pfId) * IRO[206].m1)) #define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[105].base + ((funcId) * IRO[105].m1)) + (IRO[109].base + ((funcId) * IRO[109].m1)) #define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \ - (IRO[217].base + ((pfId) * IRO[217].m1)) + (IRO[223].base + ((pfId) * IRO[223].m1)) #define TSTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[104].base + ((funcId) * IRO[104].m1)) -#define USTORM_AGG_DATA_OFFSET (IRO[206].base) -#define USTORM_AGG_DATA_SIZE (IRO[206].size) -#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) + (IRO[108].base + ((funcId) * IRO[108].m1)) +#define USTORM_AGG_DATA_OFFSET (IRO[212].base) +#define USTORM_AGG_DATA_SIZE (IRO[212].size) +#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[181].base) #define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ - (IRO[176].base + ((assertListEntry) * IRO[176].m1)) + (IRO[180].base + ((assertListEntry) * IRO[180].m1)) #define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ - (IRO[183].base + ((portId) * IRO[183].m1)) + (IRO[187].base + ((portId) * IRO[187].m1)) #define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ - (IRO[319].base + ((pfId) * IRO[319].m1)) + (IRO[325].base + ((pfId) * IRO[325].m1)) #define USTORM_FUNC_EN_OFFSET(funcId) \ - (IRO[178].base + ((funcId) * IRO[178].m1)) + (IRO[182].base + ((funcId) * IRO[182].m1)) #define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ - (IRO[283].base + ((pfId) * IRO[283].m1)) + (IRO[289].base + ((pfId) * IRO[289].m1)) #define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \ - (IRO[284].base + ((pfId) * IRO[284].m1)) + (IRO[290].base + ((pfId) * IRO[290].m1)) #define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ - (IRO[288].base + ((pfId) * IRO[288].m1)) + (IRO[294].base + ((pfId) * IRO[294].m1)) #define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ - (IRO[285].base + ((pfId) * IRO[285].m1)) + (IRO[291].base + ((pfId) * IRO[291].m1)) #define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[281].base + ((pfId) * IRO[281].m1)) + (IRO[287].base + ((pfId) * IRO[287].m1)) #define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[280].base + ((pfId) * IRO[280].m1)) + (IRO[286].base + ((pfId) * IRO[286].m1)) #define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[279].base + ((pfId) * IRO[279].m1)) + (IRO[285].base + ((pfId) * IRO[285].m1)) #define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[282].base + ((pfId) * IRO[282].m1)) + (IRO[288].base + ((pfId) * IRO[288].m1)) #define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ - (IRO[286].base + ((pfId) * IRO[286].m1)) + (IRO[292].base + ((pfId) * IRO[292].m1)) #define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ - (IRO[287].base + ((pfId) * IRO[287].m1)) + (IRO[293].base + ((pfId) * IRO[293].m1)) #define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ - (IRO[182].base + ((pfId) * IRO[182].m1)) + (IRO[186].base + ((pfId) * IRO[186].m1)) #define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ - (IRO[180].base + ((funcId) * IRO[180].m1)) + (IRO[184].base + ((funcId) * IRO[184].m1)) #define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \ - (IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \ - IRO[209].m2)) + (IRO[215].base + ((portId) * IRO[215].m1) + ((clientId) * \ + IRO[215].m2)) #define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \ - (IRO[210].base + ((qzoneId) * IRO[210].m1)) -#define USTORM_TPA_BTR_OFFSET (IRO[207].base) -#define USTORM_TPA_BTR_SIZE (IRO[207].size) + (IRO[216].base + ((qzoneId) * IRO[216].m1)) +#define USTORM_TPA_BTR_OFFSET (IRO[213].base) +#define USTORM_TPA_BTR_SIZE (IRO[213].size) #define USTORM_VF_TO_PF_OFFSET(funcId) \ - (IRO[179].base + ((funcId) * IRO[179].m1)) + (IRO[183].base + ((funcId) * IRO[183].m1)) #define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base) #define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base) #define XSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[51].base) @@ -186,39 +186,39 @@ #define XSTORM_FUNC_EN_OFFSET(funcId) \ (IRO[47].base + ((funcId) * IRO[47].m1)) #define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ - (IRO[296].base + ((pfId) * IRO[296].m1)) + (IRO[302].base + ((pfId) * IRO[302].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ - (IRO[299].base + ((pfId) * IRO[299].m1)) + (IRO[305].base + ((pfId) * IRO[305].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \ - (IRO[300].base + ((pfId) * IRO[300].m1)) + (IRO[306].base + ((pfId) * IRO[306].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ - (IRO[301].base + ((pfId) * IRO[301].m1)) + (IRO[307].base + ((pfId) * IRO[307].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ - (IRO[302].base + ((pfId) * IRO[302].m1)) + (IRO[308].base + ((pfId) * IRO[308].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ - (IRO[303].base + ((pfId) * IRO[303].m1)) + (IRO[309].base + ((pfId) * IRO[309].m1)) #define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ - (IRO[304].base + ((pfId) * IRO[304].m1)) + (IRO[310].base + ((pfId) * IRO[310].m1)) #define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ - (IRO[305].base + ((pfId) * IRO[305].m1)) + (IRO[311].base + ((pfId) * IRO[311].m1)) #define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ - (IRO[295].base + ((pfId) * IRO[295].m1)) + (IRO[301].base + ((pfId) * IRO[301].m1)) #define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ - (IRO[294].base + ((pfId) * IRO[294].m1)) + (IRO[300].base + ((pfId) * IRO[300].m1)) #define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ - (IRO[293].base + ((pfId) * IRO[293].m1)) + (IRO[299].base + ((pfId) * IRO[299].m1)) #define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ - (IRO[298].base + ((pfId) * IRO[298].m1)) + (IRO[304].base + ((pfId) * IRO[304].m1)) #define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ - (IRO[297].base + ((pfId) * IRO[297].m1)) + (IRO[303].base + ((pfId) * IRO[303].m1)) #define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ - (IRO[292].base + ((pfId) * IRO[292].m1)) + (IRO[298].base + ((pfId) * IRO[298].m1)) #define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ - (IRO[291].base + ((pfId) * IRO[291].m1)) + (IRO[297].base + ((pfId) * IRO[297].m1)) #define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ - (IRO[290].base + ((pfId) * IRO[290].m1)) + (IRO[296].base + ((pfId) * IRO[296].m1)) #define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ - (IRO[289].base + ((pfId) * IRO[289].m1)) + (IRO[295].base + ((pfId) * IRO[295].m1)) #define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ (IRO[44].base + ((pfId) * IRO[44].m1)) #define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ @@ -231,16 +231,19 @@ #define XSTORM_SPQ_PROD_OFFSET(funcId) \ (IRO[31].base + ((funcId) * IRO[31].m1)) #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \ - (IRO[211].base + ((portId) * IRO[211].m1)) + (IRO[217].base + ((portId) * IRO[217].m1)) #define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \ - (IRO[212].base + ((portId) * IRO[212].m1)) + (IRO[218].base + ((portId) * IRO[218].m1)) #define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \ - (IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \ - IRO[214].m2)) + (IRO[220].base + (((pfId)>>1) * IRO[220].m1) + (((pfId)&1) * \ + IRO[220].m2)) #define XSTORM_VF_TO_PF_OFFSET(funcId) \ (IRO[48].base + ((funcId) * IRO[48].m1)) #define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0 +/* eth hsi version */ +#define ETH_FP_HSI_VERSION (ETH_FP_HSI_VER_2) + /* Ethernet Ring parameters */ #define X_ETH_LOCAL_RING_SIZE 13 #define FIRST_BD_IN_PKT 0 @@ -356,6 +359,7 @@ #define XSEMI_CLK1_RESUL_CHIP (1e-3) #define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6)) +#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6)) /**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index c4daa068f1db..583591d52497 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -280,17 +280,11 @@ struct shared_hw_cfg { /* NVRAM Offset */ #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH 0x60000000 #define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED 0x80000000 - - u32 power_dissipated; /* 0x11c */ - #define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK 0x00ff0000 - #define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT 16 - #define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE 0x00000000 - #define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT 0x00010000 - #define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT 0x00020000 - #define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT 0x00030000 - - #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 - #define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT 24 + u32 config_3; /* 0x11C */ + #define SHARED_HW_CFG_EXTENDED_MF_MODE_MASK 0x00000F00 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_SHIFT 8 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5 0x00000000 + #define SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR2_DOT_0 0x00000100 u32 ump_nc_si_config; /* 0x120 */ #define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK 0x00000003 @@ -859,6 +853,8 @@ struct shared_feat_cfg { /* NVRAM Offset */ #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE 0x00000600 + #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE 0x00000700 /* The interval in seconds between sending LLDP packets. Set to zero to disable the feature */ @@ -1268,6 +1264,10 @@ struct drv_func_mb { #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 + #define DRV_MSG_CODE_OEM_OK 0x00010000 + #define DRV_MSG_CODE_OEM_FAILURE 0x00020000 + #define DRV_MSG_CODE_OEM_UPDATE_SVID_OK 0x00030000 + #define DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE 0x00040000 /* * The optic module verification command requires bootcode * v5.0.6 or later, te specific optic module verification command @@ -1423,6 +1423,12 @@ struct drv_func_mb { #define DRV_STATUS_SET_MF_BW 0x00000004 #define DRV_STATUS_LINK_EVENT 0x00000008 + #define DRV_STATUS_OEM_EVENT_MASK 0x00000070 + #define DRV_STATUS_OEM_DISABLE_ENABLE_PF 0x00000010 + #define DRV_STATUS_OEM_BANDWIDTH_ALLOCATION 0x00000020 + + #define DRV_STATUS_OEM_UPDATE_SVID 0x00000080 + #define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 #define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 #define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION 0x00000200 @@ -2881,8 +2887,8 @@ struct afex_stats { }; #define BCM_5710_FW_MAJOR_VERSION 7 -#define BCM_5710_FW_MINOR_VERSION 8 -#define BCM_5710_FW_REVISION_VERSION 19 +#define BCM_5710_FW_MINOR_VERSION 10 +#define BCM_5710_FW_REVISION_VERSION 51 #define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_COMPILE_FLAGS 1 @@ -3451,6 +3457,7 @@ enum classify_rule { CLASSIFY_RULE_OPCODE_MAC, CLASSIFY_RULE_OPCODE_VLAN, CLASSIFY_RULE_OPCODE_PAIR, + CLASSIFY_RULE_OPCODE_VXLAN, MAX_CLASSIFY_RULE }; @@ -3480,7 +3487,8 @@ struct client_init_general_data { u8 func_id; u8 cos; u8 traffic_type; - u32 reserved0; + u8 fp_hsi_ver; + u8 reserved0[3]; }; @@ -3550,7 +3558,9 @@ struct client_init_rx_data { __le16 rx_cos_mask; __le16 silent_vlan_value; __le16 silent_vlan_mask; - __le32 reserved6[2]; + u8 handle_ptp_pkts_flg; + u8 reserved6[3]; + __le32 reserved7; }; /* @@ -3581,7 +3591,7 @@ struct client_init_tx_data { u8 tunnel_lso_inc_ip_id; u8 refuse_outband_vlan_flg; u8 tunnel_non_lso_pcsum_location; - u8 reserved1; + u8 tunnel_non_lso_outer_ip_csum_location; }; /* @@ -3619,7 +3629,9 @@ struct client_update_ramrod_data { u8 refuse_outband_vlan_change_flg; u8 tx_switching_flg; u8 tx_switching_change_flg; - __le32 reserved1; + u8 handle_ptp_pkts_flg; + u8 handle_ptp_pkts_change_flg; + __le16 reserved1; __le32 echo; }; @@ -3639,6 +3651,11 @@ struct double_regpair { u32 regpair1_hi; }; +/* 2nd parse bd type used in ethernet tx BDs */ +enum eth_2nd_parse_bd_type { + ETH_2ND_PARSE_BD_TYPE_LSO_TUNNEL, + MAX_ETH_2ND_PARSE_BD_TYPE +}; /* * Ethernet address typesm used in ethernet tx BDs @@ -3724,12 +3741,25 @@ struct eth_classify_vlan_cmd { }; /* + * Command for adding/removing a VXLAN classification rule + */ +struct eth_classify_vxlan_cmd { + struct eth_classify_cmd_header header; + __le32 vni; + __le16 inner_mac_lsb; + __le16 inner_mac_mid; + __le16 inner_mac_msb; + __le16 reserved1; +}; + +/* * union for eth classification rule */ union eth_classify_rule_cmd { struct eth_classify_mac_cmd mac; struct eth_classify_vlan_cmd vlan; struct eth_classify_pair_cmd pair; + struct eth_classify_vxlan_cmd vxlan; }; /* @@ -3835,8 +3865,10 @@ struct eth_fast_path_rx_cqe { #define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4 #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5) #define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5 -#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6) -#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_PTP_PKT (0x1<<6) +#define ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT 6 +#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x1<<7) +#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 7 u8 status_flags; #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0) #define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0 @@ -3907,6 +3939,13 @@ struct eth_filter_rules_ramrod_data { struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT]; }; +/* Hsi version */ +enum eth_fp_hsi_ver { + ETH_FP_HSI_VER_0, + ETH_FP_HSI_VER_1, + ETH_FP_HSI_VER_2, + MAX_ETH_FP_HSI_VER +}; /* * parameters for eth classification configuration ramrod @@ -3955,29 +3994,17 @@ struct eth_mac_addresses { /* tunneling related data */ struct eth_tunnel_data { -#if defined(__BIG_ENDIAN) - __le16 dst_mid; - __le16 dst_lo; -#elif defined(__LITTLE_ENDIAN) __le16 dst_lo; __le16 dst_mid; -#endif -#if defined(__BIG_ENDIAN) - __le16 reserved0; - __le16 dst_hi; -#elif defined(__LITTLE_ENDIAN) __le16 dst_hi; - __le16 reserved0; -#endif -#if defined(__BIG_ENDIAN) - u8 reserved1; - u8 ip_hdr_start_inner_w; - __le16 pseudo_csum; -#elif defined(__LITTLE_ENDIAN) + __le16 fw_ip_hdr_csum; __le16 pseudo_csum; u8 ip_hdr_start_inner_w; - u8 reserved1; -#endif + u8 flags; +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0) +#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0 +#define ETH_TUNNEL_DATA_RESERVED (0x7F<<1) +#define ETH_TUNNEL_DATA_RESERVED_SHIFT 1 }; /* union for mac addresses and for tunneling data. @@ -4064,31 +4091,41 @@ enum eth_rss_mode { */ struct eth_rss_update_ramrod_data { u8 rss_engine_id; - u8 capabilities; + u8 rss_mode; + __le16 capabilities; #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2) #define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) -#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6) -#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6 -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) -#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY (0x1<<3) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY_SHIFT 3 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<4) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 4 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<5) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 5 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<6) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6 +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7) +#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7 +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8) +#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8 +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9) +#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9 +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10) +#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10 +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11) +#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11 +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12) +#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12 u8 rss_result_mask; - u8 rss_mode; - __le16 udp_4tuple_dst_port_mask; - __le16 udp_4tuple_dst_port_value; + u8 reserved3; + __le16 reserved4; u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; __le32 rss_key[T_ETH_RSS_KEY]; __le32 echo; - __le32 reserved3; + __le32 reserved5; }; @@ -4260,10 +4297,10 @@ enum eth_tunnel_lso_inc_ip_id { /* In case tunnel exist and L4 checksum offload, * the pseudo checksum location, on packet or on BD. */ -enum eth_tunnel_non_lso_pcsum_location { - PCSUM_ON_PKT, - PCSUM_ON_BD, - MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION +enum eth_tunnel_non_lso_csum_location { + CSUM_ON_PKT, + CSUM_ON_BD, + MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION }; /* @@ -4310,8 +4347,10 @@ struct eth_tx_start_bd { __le16 vlan_or_ethertype; struct eth_tx_bd_flags bd_flags; u8 general_data; -#define ETH_TX_START_BD_HDR_NBDS (0xF<<0) +#define ETH_TX_START_BD_HDR_NBDS (0x7<<0) #define ETH_TX_START_BD_HDR_NBDS_SHIFT 0 +#define ETH_TX_START_BD_NO_ADDED_TAGS (0x1<<3) +#define ETH_TX_START_BD_NO_ADDED_TAGS_SHIFT 3 #define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4) #define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 #define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) @@ -4387,8 +4426,8 @@ struct eth_tx_parse_2nd_bd { __le16 global_data; #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0) #define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0 -#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4) -#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4 +#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x1<<4) +#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 4 #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5) #define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5 #define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6) @@ -4397,9 +4436,14 @@ struct eth_tx_parse_2nd_bd { #define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7 #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8) #define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8 -#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13) -#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13 - __le16 reserved1; +#define ETH_TX_PARSE_2ND_BD_RESERVED1 (0x7<<13) +#define ETH_TX_PARSE_2ND_BD_RESERVED1_SHIFT 13 + u8 bd_type; +#define ETH_TX_PARSE_2ND_BD_TYPE (0xF<<0) +#define ETH_TX_PARSE_2ND_BD_TYPE_SHIFT 0 +#define ETH_TX_PARSE_2ND_BD_RESERVED2 (0xF<<4) +#define ETH_TX_PARSE_2ND_BD_RESERVED2_SHIFT 4 + u8 reserved3; u8 tcp_flags; #define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0) #define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0 @@ -4417,7 +4461,7 @@ struct eth_tx_parse_2nd_bd { #define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6 #define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7) #define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7 - u8 reserved2; + u8 reserved4; u8 tunnel_udp_hdr_start_w; u8 fw_ip_hdr_to_payload_w; __le16 fw_ip_csum_wo_len_flags_frag; @@ -5205,10 +5249,18 @@ struct function_start_data { u8 path_id; u8 network_cos_mode; u8 dmae_cmd_id; - u8 gre_tunnel_mode; - u8 gre_tunnel_rss; - u8 nvgre_clss_en; - __le16 reserved1[2]; + u8 tunnel_mode; + u8 gre_tunnel_type; + u8 tunn_clss_en; + u8 inner_gre_rss_en; + u8 sd_accept_mf_clss_fail; + __le16 vxlan_dst_port; + __le16 sd_accept_mf_clss_fail_ethtype; + __le16 sd_vlan_eth_type; + u8 sd_vlan_force_pri_flg; + u8 sd_vlan_force_pri_val; + u8 sd_accept_mf_clss_fail_match_ethtype; + u8 no_added_tags; }; struct function_update_data { @@ -5225,12 +5277,20 @@ struct function_update_data { u8 tx_switch_suspend_change_flg; u8 tx_switch_suspend; u8 echo; + u8 update_tunn_cfg_flg; + u8 tunnel_mode; + u8 gre_tunnel_type; + u8 tunn_clss_en; + u8 inner_gre_rss_en; + __le16 vxlan_dst_port; + u8 sd_vlan_force_pri_change_flg; + u8 sd_vlan_force_pri_flg; + u8 sd_vlan_force_pri_val; + u8 sd_vlan_tag_change_flg; + u8 sd_vlan_eth_type_change_flg; u8 reserved1; - u8 update_gre_cfg_flg; - u8 gre_tunnel_mode; - u8 gre_tunnel_rss; - u8 nvgre_clss_en; - u32 reserved3; + __le16 sd_vlan_tag; + __le16 sd_vlan_eth_type; }; /* @@ -5259,17 +5319,9 @@ struct fw_version { #define __FW_VERSION_RESERVED_SHIFT 4 }; -/* GRE RSS Mode */ -enum gre_rss_mode { - GRE_OUTER_HEADERS_RSS, - GRE_INNER_HEADERS_RSS, - NVGRE_KEY_ENTROPY_RSS, - MAX_GRE_RSS_MODE -}; /* GRE Tunnel Mode */ enum gre_tunnel_type { - NO_GRE_TUNNEL, NVGRE_TUNNEL, L2GRE_TUNNEL, IPGRE_TUNNEL, @@ -5442,6 +5494,7 @@ enum ip_ver { * Malicious VF error ID */ enum malicious_vf_error_id { + MALICIOUS_VF_NO_ERROR, VF_PF_CHANNEL_NOT_READY, ETH_ILLEGAL_BD_LENGTHS, ETH_PACKET_TOO_SHORT, @@ -5602,6 +5655,16 @@ struct protocol_common_spe { union protocol_common_specific_data data; }; +/* The data for the Set Timesync Ramrod */ +struct set_timesync_ramrod_data { + u8 drift_adjust_cmd; + u8 offset_cmd; + u8 add_sub_drift_adjust_value; + u8 drift_adjust_value; + u32 drift_adjust_period; + struct regpair offset_delta; +}; + /* * The send queue element */ @@ -5724,10 +5787,38 @@ struct tstorm_vf_zone_data { struct regpair reserved; }; +/* Add or Subtract Value for Set Timesync Ramrod */ +enum ts_add_sub_value { + TS_SUB_VALUE, + TS_ADD_VALUE, + MAX_TS_ADD_SUB_VALUE +}; -/* - * zone A per-queue data - */ +/* Drift-Adjust Commands for Set Timesync Ramrod */ +enum ts_drift_adjust_cmd { + TS_DRIFT_ADJUST_KEEP, + TS_DRIFT_ADJUST_SET, + TS_DRIFT_ADJUST_RESET, + MAX_TS_DRIFT_ADJUST_CMD +}; + +/* Offset Commands for Set Timesync Ramrod */ +enum ts_offset_cmd { + TS_OFFSET_KEEP, + TS_OFFSET_INC, + TS_OFFSET_DEC, + MAX_TS_OFFSET_CMD +}; + +/* Tunnel Mode */ +enum tunnel_mode { + TUNN_MODE_NONE, + TUNN_MODE_VXLAN, + TUNN_MODE_GRE, + MAX_TUNNEL_MODE +}; + + /* zone A per-queue data */ struct ustorm_queue_zone_data { struct ustorm_eth_rx_producers eth_rx_producers; struct regpair reserved[3]; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index d1c093dcb054..74fbf9ea7bd8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -41,6 +41,7 @@ #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/if_vlan.h> +#include <linux/crash_dump.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/tcp.h> @@ -63,7 +64,6 @@ #include "bnx2x_vfpf.h" #include "bnx2x_dcb.h" #include "bnx2x_sp.h" - #include <linux/firmware.h> #include "bnx2x_fw_file_hdr.h" /* FW files */ @@ -290,6 +290,8 @@ static int bnx2x_set_storm_rx_mode(struct bnx2x *bp); * General service functions ****************************************************************************/ +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr); + static void __storm_memset_dma_mapping(struct bnx2x *bp, u32 addr, dma_addr_t mapping) { @@ -523,6 +525,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, * as long as this code is called both from syscall context and * from ndo_set_rx_mode() flow that may be called from BH. */ + spin_lock_bh(&bp->dmae_lock); /* reset completion */ @@ -551,7 +554,9 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae, } unlock: + spin_unlock_bh(&bp->dmae_lock); + return rc; } @@ -646,119 +651,98 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); } +enum storms { + XSTORM, + TSTORM, + CSTORM, + USTORM, + MAX_STORMS +}; + +#define STORMS_NUM 4 +#define REGS_IN_ENTRY 4 + +static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp, + enum storms storm, + int entry) +{ + switch (storm) { + case XSTORM: + return XSTORM_ASSERT_LIST_OFFSET(entry); + case TSTORM: + return TSTORM_ASSERT_LIST_OFFSET(entry); + case CSTORM: + return CSTORM_ASSERT_LIST_OFFSET(entry); + case USTORM: + return USTORM_ASSERT_LIST_OFFSET(entry); + case MAX_STORMS: + default: + BNX2X_ERR("unknown storm\n"); + } + return -EINVAL; +} + static int bnx2x_mc_assert(struct bnx2x *bp) { char last_idx; - int i, rc = 0; - u32 row0, row1, row2, row3; - - /* XSTORM */ - last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_XSTRORM_INTMEM + - XSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } - - /* TSTORM */ - last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_TSTRORM_INTMEM + - TSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } + int i, j, rc = 0; + enum storms storm; + u32 regs[REGS_IN_ENTRY]; + u32 bar_storm_intmem[STORMS_NUM] = { + BAR_XSTRORM_INTMEM, + BAR_TSTRORM_INTMEM, + BAR_CSTRORM_INTMEM, + BAR_USTRORM_INTMEM + }; + u32 storm_assert_list_index[STORMS_NUM] = { + XSTORM_ASSERT_LIST_INDEX_OFFSET, + TSTORM_ASSERT_LIST_INDEX_OFFSET, + CSTORM_ASSERT_LIST_INDEX_OFFSET, + USTORM_ASSERT_LIST_INDEX_OFFSET + }; + char *storms_string[STORMS_NUM] = { + "XSTORM", + "TSTORM", + "CSTORM", + "USTORM" + }; - /* CSTORM */ - last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_CSTRORM_INTMEM + - CSTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; + for (storm = XSTORM; storm < MAX_STORMS; storm++) { + last_idx = REG_RD8(bp, bar_storm_intmem[storm] + + storm_assert_list_index[storm]); + if (last_idx) + BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n", + storms_string[storm], last_idx); + + /* print the asserts */ + for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { + /* read a single assert entry */ + for (j = 0; j < REGS_IN_ENTRY; j++) + regs[j] = REG_RD(bp, bar_storm_intmem[storm] + + bnx2x_get_assert_list_entry(bp, + storm, + i) + + sizeof(u32) * j); + + /* log entry if it contains a valid assert */ + if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) { + BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", + storms_string[storm], i, regs[3], + regs[2], regs[1], regs[0]); + rc++; + } else { + break; + } } } - /* USTORM */ - last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_INDEX_OFFSET); - if (last_idx) - BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); - - /* print the asserts */ - for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) { - - row0 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i)); - row1 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 4); - row2 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 8); - row3 = REG_RD(bp, BAR_USTRORM_INTMEM + - USTORM_ASSERT_LIST_OFFSET(i) + 12); - - if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { - BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, row3, row2, row1, row0); - rc++; - } else { - break; - } - } + BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n", + CHIP_IS_E1(bp) ? "everest1" : + CHIP_IS_E1H(bp) ? "everest1h" : + CHIP_IS_E2(bp) ? "everest2" : "everest3", + BCM_5710_FW_MAJOR_VERSION, + BCM_5710_FW_MINOR_VERSION, + BCM_5710_FW_REVISION_VERSION); return rc; } @@ -983,6 +967,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) u32 *sb_data_p; struct bnx2x_fp_txdata txdata; + if (!bp->fp) + break; + + if (!fp->rx_cons_sb) + continue; + /* Rx */ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n", i, fp->rx_bd_prod, fp->rx_bd_cons, @@ -995,7 +985,14 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) /* Tx */ for_each_cos_in_tx_queue(fp, cos) { + if (!fp->txdata_ptr[cos]) + break; + txdata = *fp->txdata_ptr[cos]; + + if (!txdata.tx_cons_sb) + continue; + BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", i, txdata.tx_pkt_prod, txdata.tx_pkt_cons, txdata.tx_bd_prod, @@ -1097,6 +1094,12 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) for_each_valid_rx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + if (!bp->fp) + break; + + if (!fp->rx_cons_sb) + continue; + start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503); for (j = start; j != end; j = RX_BD(j + 1)) { @@ -1130,9 +1133,19 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) /* Tx */ for_each_valid_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; + + if (!bp->fp) + break; + for_each_cos_in_tx_queue(fp, cos) { struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + if (!fp->txdata_ptr[cos]) + break; + + if (!txdata->tx_cons_sb) + continue; + start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); for (j = start; j != end; j = TX_BD(j + 1)) { @@ -2071,8 +2084,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port) else value = 0; - DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value); - return value; } @@ -2894,6 +2905,57 @@ static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd) } } +static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp) +{ + struct bnx2x_func_switch_update_params *switch_update_params; + struct bnx2x_func_state_params func_params; + + memset(&func_params, 0, sizeof(struct bnx2x_func_state_params)); + switch_update_params = &func_params.params.switch_update; + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; + + if (IS_MF_UFP(bp)) { + int func = BP_ABS_FUNC(bp); + u32 val; + + /* Re-learn the S-tag from shmem */ + val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) & + FUNC_MF_CFG_E1HOV_TAG_MASK; + if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { + bp->mf_ov = val; + } else { + BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n"); + goto fail; + } + + /* Configure new S-tag in LLH */ + REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8, + bp->mf_ov); + + /* Send Ramrod to update FW of change */ + __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, + &switch_update_params->changes); + switch_update_params->vlan = bp->mf_ov; + + if (bnx2x_func_state_change(bp, &func_params) < 0) { + BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n", + bp->mf_ov); + goto fail; + } + + DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov); + + bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0); + + return; + } + + /* not supported by SW yet */ +fail: + bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0); +} + static void bnx2x_pmf_update(struct bnx2x *bp) { int port = BP_PORT(bp); @@ -3286,7 +3348,8 @@ static void bnx2x_e1h_enable(struct bnx2x *bp) { int port = BP_PORT(bp); - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); + if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); /* Tx queue should be only re-enabled */ netif_tx_wake_all_queues(bp->dev); @@ -3641,14 +3704,30 @@ out: ethver, iscsiver, fcoever); } -static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) +static void bnx2x_oem_event(struct bnx2x *bp, u32 event) { - DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); + u32 cmd_ok, cmd_fail; - if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { + /* sanity */ + if (event & DRV_STATUS_DCC_EVENT_MASK && + event & DRV_STATUS_OEM_EVENT_MASK) { + BNX2X_ERR("Received simultaneous events %08x\n", event); + return; + } - /* - * This is the only place besides the function initialization + if (event & DRV_STATUS_DCC_EVENT_MASK) { + cmd_fail = DRV_MSG_CODE_DCC_FAILURE; + cmd_ok = DRV_MSG_CODE_DCC_OK; + } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ { + cmd_fail = DRV_MSG_CODE_OEM_FAILURE; + cmd_ok = DRV_MSG_CODE_OEM_OK; + } + + DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event); + + if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF | + DRV_STATUS_OEM_DISABLE_ENABLE_PF)) { + /* This is the only place besides the function initialization * where the bp->flags can change so it is done without any * locks */ @@ -3663,18 +3742,22 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) bnx2x_e1h_enable(bp); } - dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; + event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF | + DRV_STATUS_OEM_DISABLE_ENABLE_PF); } - if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { + + if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | + DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) { bnx2x_config_mf_bw(bp); - dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; + event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION | + DRV_STATUS_OEM_BANDWIDTH_ALLOCATION); } /* Report results to MCP */ - if (dcc_event) - bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0); + if (event) + bnx2x_fw_command(bp, cmd_fail, 0); else - bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0); + bnx2x_fw_command(bp, cmd_ok, 0); } /* must be called under the spq lock */ @@ -4156,9 +4239,12 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) func_mf_config[BP_ABS_FUNC(bp)].config); val = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_status); - if (val & DRV_STATUS_DCC_EVENT_MASK) - bnx2x_dcc_event(bp, - (val & DRV_STATUS_DCC_EVENT_MASK)); + + if (val & (DRV_STATUS_DCC_EVENT_MASK | + DRV_STATUS_OEM_EVENT_MASK)) + bnx2x_oem_event(bp, + (val & (DRV_STATUS_DCC_EVENT_MASK | + DRV_STATUS_OEM_EVENT_MASK))); if (val & DRV_STATUS_SET_MF_BW) bnx2x_set_mf_bw(bp); @@ -4184,6 +4270,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) val & DRV_STATUS_AFEX_EVENT_MASK); if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) bnx2x_handle_eee_event(bp); + + if (val & DRV_STATUS_OEM_UPDATE_SVID) + bnx2x_handle_update_svid_cmd(bp); + if (bp->link_vars.periodic_flags & PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ @@ -4678,7 +4768,7 @@ static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig, for (i = 0; sig; i++) { cur_bit = (0x1UL << i); if (sig & cur_bit) { - res |= true; /* Each bit is real error! */ + res = true; /* Each bit is real error! */ if (print) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: @@ -4757,21 +4847,21 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, _print_next_block((*par_num)++, "MCP ROM"); *global = true; - res |= true; + res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) _print_next_block((*par_num)++, "MCP UMP RX"); *global = true; - res |= true; + res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) _print_next_block((*par_num)++, "MCP UMP TX"); *global = true; - res |= true; + res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) @@ -4803,7 +4893,7 @@ static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig, for (i = 0; sig; i++) { cur_bit = (0x1UL << i); if (sig & cur_bit) { - res |= true; /* Each bit is real error! */ + res = true; /* Each bit is real error! */ if (print) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: @@ -5452,6 +5542,14 @@ static void bnx2x_eq_int(struct bnx2x *bp) break; goto next_spqe; + + case EVENT_RING_OPCODE_SET_TIMESYNC: + DP(BNX2X_MSG_SP | BNX2X_MSG_PTP, + "got set_timesync ramrod completion\n"); + if (f_obj->complete_cmd(bp, f_obj, + BNX2X_F_CMD_SET_TIMESYNC)) + break; + goto next_spqe; } switch (opcode | bp->state) { @@ -6102,7 +6200,7 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode, } /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ - if (bp->rx_mode != BNX2X_RX_MODE_NONE) { + if (rx_mode != BNX2X_RX_MODE_NONE) { __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags); __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags); } @@ -7662,7 +7760,11 @@ static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend) func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE; /* Function parameters */ - switch_update_params->suspend = suspend; + __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + &switch_update_params->changes); + if (suspend) + __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + &switch_update_params->changes); rc = bnx2x_func_state_change(bp, &func_params); @@ -7907,8 +8009,11 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1); if (IS_MF(bp)) { - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); - REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov); + if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) { + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1); + REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8, + bp->mf_ov); + } } bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase); @@ -8300,13 +8405,6 @@ int bnx2x_del_all_macs(struct bnx2x *bp, int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) { - if (is_zero_ether_addr(bp->dev->dev_addr) && - (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { - DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, - "Ignoring Zero MAC for STORAGE SD mode\n"); - return 0; - } - if (IS_PF(bp)) { unsigned long ramrod_flags = 0; @@ -9025,7 +9123,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) struct bnx2x_func_state_params func_params = {NULL}; DP(NETIF_MSG_IFDOWN, - "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n"); + "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n"); func_params.f_obj = &bp->func_obj; __set_bit(RAMROD_DRV_CLR_ONLY, @@ -9044,6 +9142,48 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) return 0; } +static void bnx2x_disable_ptp(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + + /* Disable sending PTP packets to host */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); + + /* Reset PTP event detection rules */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable the PTP feature */ + REG_WR(bp, port ? NIG_REG_P1_PTP_EN : + NIG_REG_P0_PTP_EN, 0x0); +} + +/* Called during unload, to stop PTP-related stuff */ +void bnx2x_stop_ptp(struct bnx2x *bp) +{ + /* Cancel PTP work queue. Should be done after the Tx queues are + * drained to prevent additional scheduling. + */ + cancel_work_sync(&bp->ptp_task); + + if (bp->ptp_tx_skb) { + dev_kfree_skb_any(bp->ptp_tx_skb); + bp->ptp_tx_skb = NULL; + } + + /* Disable PTP in HW */ + bnx2x_disable_ptp(bp); + + DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n"); +} + void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) { int port = BP_PORT(bp); @@ -9162,6 +9302,13 @@ unload_error: #endif } + /* stop_ptp should be after the Tx queues are drained to prevent + * scheduling to the cancelled PTP work queue. It should also be after + * function stop ramrod is sent, since as part of this ramrod FW access + * PTP registers. + */ + bnx2x_stop_ptp(bp); + /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); /* Delete all NAPI objects */ @@ -11283,15 +11430,14 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp) dev_info.port_hw_config[port]. fcoe_wwn_node_name_lower); } else if (!IS_MF_SD(bp)) { - /* - * Read the WWN info only if the FCoE feature is enabled for + /* Read the WWN info only if the FCoE feature is enabled for * this function. */ - if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) + if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp)) + bnx2x_get_ext_wwn_info(bp, func); + } else { + if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) bnx2x_get_ext_wwn_info(bp, func); - - } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) { - bnx2x_get_ext_wwn_info(bp, func); } BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); @@ -11329,7 +11475,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp) * In non SD mode features configuration comes from struct * func_ext_config. */ - if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) { + if (!IS_MF_SD(bp)) { u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { val2 = MF_CFG_RD(bp, func_ext_config[func]. @@ -11448,7 +11594,7 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp) memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); - if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) + if (!is_valid_ether_addr(bp->dev->dev_addr)) dev_err(&bp->pdev->dev, "bad Ethernet MAC address configuration: %pM\n" "change it manually before bringing up the appropriate network interface\n", @@ -11478,11 +11624,27 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp) return cfg; } +static void validate_set_si_mode(struct bnx2x *bp) +{ + u8 func = BP_ABS_FUNC(bp); + u32 val; + + val = MF_CFG_RD(bp, func_mf_config[func].mac_upper); + + /* check for legal mac (upper bytes) */ + if (val != 0xffff) { + bp->mf_mode = MULTI_FUNCTION_SI; + bp->mf_config[BP_VN(bp)] = + MF_CFG_RD(bp, func_mf_config[func].config); + } else + BNX2X_DEV_INFO("illegal MAC address for SI\n"); +} + static int bnx2x_get_hwinfo(struct bnx2x *bp) { int /*abs*/func = BP_ABS_FUNC(bp); int vn; - u32 val = 0; + u32 val = 0, val2 = 0; int rc = 0; bnx2x_get_common_hwinfo(bp); @@ -11562,6 +11724,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) bp->mf_ov = 0; bp->mf_mode = 0; + bp->mf_sub_mode = 0; vn = BP_VN(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { @@ -11591,15 +11754,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) switch (val) { case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: - val = MF_CFG_RD(bp, func_mf_config[func]. - mac_upper); - /* check for legal mac (upper bytes)*/ - if (val != 0xffff) { - bp->mf_mode = MULTI_FUNCTION_SI; - bp->mf_config[vn] = MF_CFG_RD(bp, - func_mf_config[func].config); - } else - BNX2X_DEV_INFO("illegal MAC address for SI\n"); + validate_set_si_mode(bp); break; case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: if ((!CHIP_IS_E1x(bp)) && @@ -11627,9 +11782,33 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) } else BNX2X_DEV_INFO("illegal OV for SD\n"); break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE: + bp->mf_mode = MULTI_FUNCTION_SD; + bp->mf_sub_mode = SUB_MF_MODE_UFP; + bp->mf_config[vn] = + MF_CFG_RD(bp, + func_mf_config[func].config); + break; case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: bp->mf_config[vn] = 0; break; + case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE: + val2 = SHMEM_RD(bp, + dev_info.shared_hw_config.config_3); + val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK; + switch (val2) { + case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5: + validate_set_si_mode(bp); + bp->mf_sub_mode = + SUB_MF_MODE_NPAR1_DOT_5; + break; + default: + /* Unknown configuration */ + bp->mf_config[vn] = 0; + BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n", + val); + } + break; default: /* Unknown configuration: reset mf_config */ bp->mf_config[vn] = 0; @@ -11650,6 +11829,11 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n", func, bp->mf_ov, bp->mf_ov); + } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) { + dev_err(&bp->pdev->dev, + "Unexpected - no valid MF OV for func %d in UFP mode\n", + func); + bp->path_has_ovlan = true; } else { dev_err(&bp->pdev->dev, "No valid MF OV for func %d, aborting\n", @@ -11898,9 +12082,9 @@ static int bnx2x_init_bp(struct bnx2x *bp) dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); bp->disable_tpa = disable_tpa; - bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); + bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp); /* Reduce memory usage in kdump environment by disabling TPA */ - bp->disable_tpa |= reset_devices; + bp->disable_tpa |= is_kdump_kernel(); /* Set TPA flags */ if (bp->disable_tpa) { @@ -11918,7 +12102,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->mrrs = mrrs; - bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL; + bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL; if (IS_VF(bp)) bp->rx_ring_size = MAX_RX_AVAIL; @@ -11976,6 +12160,9 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->dump_preset_idx = 1; + if (CHIP_IS_E3B0(bp)) + bp->flags |= PTP_SUPPORTED; + return rc; } @@ -12235,7 +12422,7 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp) bp->rx_mode = rx_mode; /* handle ISCSI SD mode */ - if (IS_MF_ISCSI_SD(bp)) + if (IS_MF_ISCSI_ONLY(bp)) bp->rx_mode = BNX2X_RX_MODE_NONE; /* Schedule the rx_mode command */ @@ -12308,13 +12495,17 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) struct bnx2x *bp = netdev_priv(dev); struct mii_ioctl_data *mdio = if_mii(ifr); - DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", - mdio->phy_id, mdio->reg_num, mdio->val_in); - if (!netif_running(dev)) return -EAGAIN; - return mdio_mii_ioctl(&bp->mdio, mdio, cmd); + switch (cmd) { + case SIOCSHWTSTAMP: + return bnx2x_hwtstamp_ioctl(bp, ifr); + default: + DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n", + mdio->phy_id, mdio->reg_num, mdio->val_in); + return mdio_mii_ioctl(&bp->mdio, mdio, cmd); + } } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -12338,7 +12529,7 @@ static int bnx2x_validate_addr(struct net_device *dev) if (IS_VF(bp)) bnx2x_sample_bulletin(bp); - if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) { + if (!is_valid_ether_addr(dev->dev_addr)) { BNX2X_ERR("Non-valid Ethernet address\n"); return -EADDRNOTAVAIL; } @@ -12958,6 +13149,191 @@ static int set_is_vf(int chip_id) } } +/* nig_tsgen registers relative address */ +#define tsgen_ctrl 0x0 +#define tsgen_freecount 0x10 +#define tsgen_synctime_t0 0x20 +#define tsgen_offset_t0 0x28 +#define tsgen_drift_t0 0x30 +#define tsgen_synctime_t1 0x58 +#define tsgen_offset_t1 0x60 +#define tsgen_drift_t1 0x68 + +/* FW workaround for setting drift */ +static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, + int best_val, int best_period) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_set_timesync_params *set_timesync_params = + &func_params.params.set_timesync; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; + + /* Function parameters */ + set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET; + set_timesync_params->offset_cmd = TS_OFFSET_KEEP; + set_timesync_params->add_sub_drift_adjust_value = + drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE; + set_timesync_params->drift_adjust_value = best_val; + set_timesync_params->drift_adjust_period = best_period; + + return bnx2x_func_state_change(bp, &func_params); +} + +static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + int rc; + int drift_dir = 1; + int val, period, period1, period2, dif, dif1, dif2; + int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0; + + DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); + + if (!netif_running(bp->dev)) { + DP(BNX2X_MSG_PTP, + "PTP adjfreq called while the interface is down\n"); + return -EFAULT; + } + + if (ppb < 0) { + ppb = -ppb; + drift_dir = 0; + } + + if (ppb == 0) { + best_val = 1; + best_period = 0x1FFFFFF; + } else if (ppb >= BNX2X_MAX_PHC_DRIFT) { + best_val = 31; + best_period = 1; + } else { + /* Changed not to allow val = 8, 16, 24 as these values + * are not supported in workaround. + */ + for (val = 0; val <= 31; val++) { + if ((val & 0x7) == 0) + continue; + period1 = val * 1000000 / ppb; + period2 = period1 + 1; + if (period1 != 0) + dif1 = ppb - (val * 1000000 / period1); + else + dif1 = BNX2X_MAX_PHC_DRIFT; + if (dif1 < 0) + dif1 = -dif1; + dif2 = ppb - (val * 1000000 / period2); + if (dif2 < 0) + dif2 = -dif2; + dif = (dif1 < dif2) ? dif1 : dif2; + period = (dif1 < dif2) ? period1 : period2; + if (dif < best_dif) { + best_dif = dif; + best_val = val; + best_period = period; + } + } + } + + rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val, + best_period); + if (rc) { + BNX2X_ERR("Failed to set drift\n"); + return -EFAULT; + } + + DP(BNX2X_MSG_PTP, "Configrued val = %d, period = %d\n", best_val, + best_period); + + return 0; +} + +static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 now; + + DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta); + + now = timecounter_read(&bp->timecounter); + now += delta; + /* Re-init the timecounter */ + timecounter_init(&bp->timecounter, &bp->cyclecounter, now); + + return 0; +} + +static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 ns; + u32 remainder; + + ns = timecounter_read(&bp->timecounter); + + DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns); + + ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +static int bnx2x_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + u64 ns; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns); + + /* Re-init the timecounter */ + timecounter_init(&bp->timecounter, &bp->cyclecounter, ns); + + return 0; +} + +/* Enable (or disable) ancillary features of the phc subsystem */ +static int bnx2x_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); + + BNX2X_ERR("PHC ancillary features are not supported\n"); + return -ENOTSUPP; +} + +void bnx2x_register_phc(struct bnx2x *bp) +{ + /* Fill the ptp_clock_info struct and register PTP clock*/ + bp->ptp_clock_info.owner = THIS_MODULE; + snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name); + bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */ + bp->ptp_clock_info.n_alarm = 0; + bp->ptp_clock_info.n_ext_ts = 0; + bp->ptp_clock_info.n_per_out = 0; + bp->ptp_clock_info.pps = 0; + bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; + bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; + bp->ptp_clock_info.gettime = bnx2x_ptp_gettime; + bp->ptp_clock_info.settime = bnx2x_ptp_settime; + bp->ptp_clock_info.enable = bnx2x_ptp_enable; + + bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); + if (IS_ERR(bp->ptp_clock)) { + bp->ptp_clock = NULL; + BNX2X_ERR("PTP clock registeration failed\n"); + } +} + static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -13129,6 +13505,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, "Unknown", dev->base_addr, bp->pdev->irq, dev->dev_addr); + bnx2x_register_phc(bp); + return 0; init_one_exit: @@ -13155,6 +13533,11 @@ static void __bnx2x_remove(struct pci_dev *pdev, struct bnx2x *bp, bool remove_netdev) { + if (bp->ptp_clock) { + ptp_clock_unregister(bp->ptp_clock); + bp->ptp_clock = NULL; + } + /* Delete storage MAC address */ if (!NO_FCOE(bp)) { rtnl_lock(); @@ -14136,3 +14519,332 @@ int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val) REG_RD(bp, pretend_reg); return 0; } + +static void bnx2x_ptp_task(struct work_struct *work) +{ + struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task); + int port = BP_PORT(bp); + u32 val_seq; + u64 timestamp, ns; + struct skb_shared_hwtstamps shhwtstamps; + + /* Read Tx timestamp registers */ + val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID); + if (val_seq & 0x10000) { + /* There is a valid timestamp value */ + timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB : + NIG_REG_P0_TLLH_PTP_BUF_TS_MSB); + timestamp <<= 32; + timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB : + NIG_REG_P0_TLLH_PTP_BUF_TS_LSB); + /* Reset timestamp register to allow new timestamp */ + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); + ns = timecounter_cyc2time(&bp->timecounter, timestamp); + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(bp->ptp_tx_skb); + bp->ptp_tx_skb = NULL; + + DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); + } else { + DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n"); + /* Reschedule to keep checking for a valid timestamp value */ + schedule_work(&bp->ptp_task); + } +} + +void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb) +{ + int port = BP_PORT(bp); + u64 timestamp, ns; + + timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB : + NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB); + timestamp <<= 32; + timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB : + NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB); + + /* Reset timestamp register to allow new timestamp */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : + NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); + + ns = timecounter_cyc2time(&bp->timecounter, timestamp); + + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + + DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n", + timestamp, ns); +} + +/* Read the PHC */ +static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc) +{ + struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter); + int port = BP_PORT(bp); + u32 wb_data[2]; + u64 phc_cycles; + + REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 : + NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2); + phc_cycles = wb_data[1]; + phc_cycles = (phc_cycles << 32) + wb_data[0]; + + DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles); + + return phc_cycles; +} + +static void bnx2x_init_cyclecounter(struct bnx2x *bp) +{ + memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter)); + bp->cyclecounter.read = bnx2x_cyclecounter_read; + bp->cyclecounter.mask = CLOCKSOURCE_MASK(64); + bp->cyclecounter.shift = 1; + bp->cyclecounter.mult = 1; +} + +static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp) +{ + struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_func_set_timesync_params *set_timesync_params = + &func_params.params.set_timesync; + + /* Prepare parameters for function state transitions */ + __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); + __set_bit(RAMROD_RETRY, &func_params.ramrod_flags); + + func_params.f_obj = &bp->func_obj; + func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC; + + /* Function parameters */ + set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET; + set_timesync_params->offset_cmd = TS_OFFSET_KEEP; + + return bnx2x_func_state_change(bp, &func_params); +} + +int bnx2x_enable_ptp_packets(struct bnx2x *bp) +{ + struct bnx2x_queue_state_params q_params; + int rc, i; + + /* send queue update ramrod to enable PTP packets */ + memset(&q_params, 0, sizeof(q_params)); + __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); + q_params.cmd = BNX2X_Q_CMD_UPDATE; + __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, + &q_params.params.update.update_flags); + __set_bit(BNX2X_Q_UPDATE_PTP_PKTS, + &q_params.params.update.update_flags); + + /* send the ramrod on all the queues of the PF */ + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Set the appropriate Queue object */ + q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + + /* Update the Queue state */ + rc = bnx2x_queue_state_change(bp, &q_params); + if (rc) { + BNX2X_ERR("Failed to enable PTP packets\n"); + return rc; + } + } + + return 0; +} + +int bnx2x_configure_ptp_filters(struct bnx2x *bp) +{ + int port = BP_PORT(bp); + int rc; + + if (!bp->hwtstamp_ioctl_called) + return 0; + + switch (bp->tx_type) { + case HWTSTAMP_TX_ON: + bp->flags |= TX_TIMESTAMPING_EN; + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE); + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + BNX2X_ERR("One-step timestamping is not supported\n"); + return -ERANGE; + } + + switch (bp->rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + bp->rx_filter = HWTSTAMP_FILTER_NONE; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE); + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE); + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + /* Initialize PTP detection L2 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF); + + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE); + break; + } + + /* Indicate to FW that this PF expects recorded PTP packets */ + rc = bnx2x_enable_ptp_packets(bp); + if (rc) + return rc; + + /* Enable sending PTP packets to host */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x1); + + return 0; +} + +static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int rc; + + DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n"); + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n", + config.tx_type, config.rx_filter); + + if (config.flags) { + BNX2X_ERR("config.flags is reserved for future use\n"); + return -EINVAL; + } + + bp->hwtstamp_ioctl_called = 1; + bp->tx_type = config.tx_type; + bp->rx_filter = config.rx_filter; + + rc = bnx2x_configure_ptp_filters(bp); + if (rc) + return rc; + + config.rx_filter = bp->rx_filter; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +/* Configrues HW for PTP */ +static int bnx2x_configure_ptp(struct bnx2x *bp) +{ + int rc, port = BP_PORT(bp); + u32 wb_data[2]; + + /* Reset PTP event detection rules - will be configured in the IOCTL */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK : + NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK : + NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK : + NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK : + NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable PTP packets to host - will be configured in the IOCTL*/ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST : + NIG_REG_P0_LLH_PTP_TO_HOST, 0x0); + + /* Enable the PTP feature */ + REG_WR(bp, port ? NIG_REG_P1_PTP_EN : + NIG_REG_P0_PTP_EN, 0x3F); + + /* Enable the free-running counter */ + wb_data[0] = 0; + wb_data[1] = 0; + REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2); + + /* Reset drift register (offset register is not reset) */ + rc = bnx2x_send_reset_timesync_ramrod(bp); + if (rc) { + BNX2X_ERR("Failed to reset PHC drift register\n"); + return -EFAULT; + } + + /* Reset possibly old timestamps */ + REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID : + NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000); + REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID : + NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000); + + return 0; +} + +/* Called during load, to initialize PTP-related stuff */ +void bnx2x_init_ptp(struct bnx2x *bp) +{ + int rc; + + /* Configure PTP in HW */ + rc = bnx2x_configure_ptp(bp); + if (rc) { + BNX2X_ERR("Stopping PTP initialization\n"); + return; + } + + /* Init work queue for Tx timestamping */ + INIT_WORK(&bp->ptp_task, bnx2x_ptp_task); + + /* Init cyclecounter and timecounter. This is done only in the first + * load. If done in every load, PTP application will fail when doing + * unload / load (e.g. MTU change) while it is running. + */ + if (!bp->timecounter_init_done) { + bnx2x_init_cyclecounter(bp); + timecounter_init(&bp->timecounter, &bp->cyclecounter, + ktime_to_ns(ktime_get_real())); + bp->timecounter_init_done = 1; + } + + DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n"); +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 2beb5430b876..b0779d773343 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -2182,6 +2182,45 @@ #define NIG_REG_P0_HWPFC_ENABLE 0x18078 #define NIG_REG_P0_LLH_FUNC_MEM2 0x18480 #define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE 0x18440 +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID 0x1875c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB 0x18754 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB 0x18758 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P0_LLH_PTP_RULE_MASK 0x187a4 +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P0_LLH_PTP_TO_HOST 0x187ac /* [RW 1] Input enable for RX MAC interface. */ #define NIG_REG_P0_MAC_IN_EN 0x185ac /* [RW 1] Output enable for TX MAC interface */ @@ -2194,6 +2233,17 @@ * priority field is extracted from the outer-most VLAN in receive packet. * Only COS 0 and COS 1 are supported in E2. */ #define NIG_REG_P0_PKT_PRIORITY_TO_COS 0x18054 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P0_PTP_EN 0x18788 /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A * priority is mapped to COS 0 when the corresponding mask bit is 1. More * than one bit may be set; allowing multiple priorities to be mapped to one @@ -2300,7 +2350,46 @@ * Ethernet header. */ #define NIG_REG_P1_HDRS_AFTER_BASIC 0x1818c #define NIG_REG_P1_LLH_FUNC_MEM2 0x184c0 -#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460 +#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE 0x18460a +/* [RW 17] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. Bits [15:0] return the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Writing a 1 to bit 16 + * will clear the buffer. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID 0x18774 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB 0x1876c +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * the host. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB 0x18770 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. Note that rules 0-3 are for IPv4 + * packets only and require that the packet is IPv4 for the rules to match. + * Note that rules 4-7 are for IPv6 packets only and require that the packet + * is IPv6 for the rules to match. + */ +#define NIG_REG_P1_LLH_PTP_RULE_MASK 0x187cc +/* [RW 1] Set to 1 to enable PTP packets to be forwarded to the host. */ +#define NIG_REG_P1_LLH_PTP_TO_HOST 0x187d4 /* [RW 32] Specify the client number to be assigned to each priority of the * strict priority arbiter. This register specifies bits 31:0 of the 36-bit * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0 @@ -2342,6 +2431,17 @@ * priority field is extracted from the outer-most VLAN in receive packet. * Only COS 0 and COS 1 are supported in E2. */ #define NIG_REG_P1_PKT_PRIORITY_TO_COS 0x181a8 +/* [RW 6] Enable for TimeSync feature. Bits [2:0] are for RX side. Bits + * [5:3] are for TX side. Bit 0 enables TimeSync on RX side. Bit 1 enables + * V1 frame format in timesync event detection on RX side. Bit 2 enables V2 + * frame format in timesync event detection on RX side. Bit 3 enables + * TimeSync on TX side. Bit 4 enables V1 frame format in timesync event + * detection on TX side. Bit 5 enables V2 frame format in timesync event + * detection on TX side. Note that for HW to detect PTP packet and extract + * data from the packet, at least one of the version bits of that traffic + * direction has to be enabled. + */ +#define NIG_REG_P1_PTP_EN 0x187b0 /* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A * priority is mapped to COS 0 when the corresponding mask bit is 1. More * than one bit may be set; allowing multiple priorities to be mapped to one @@ -2361,6 +2461,78 @@ #define NIG_REG_P1_RX_MACFIFO_EMPTY 0x1858c /* [R 1] TLLH FIFO is empty. */ #define NIG_REG_P1_TLLH_FIFO_EMPTY 0x18338 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_SEQID 0x187e0 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_LSB 0x187d8 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P0_TLLH_PTP_BUF_TS_MSB 0x187dc +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P0_TLLH_PTP_RULE_MASK 0x187f4 +/* [RW 19] Packet TimeSync information that is buffered in 1-deep FIFOs for + * TX side. Bits [15:0] reflect the sequence ID of the packet. Bit 16 + * indicates the validity of the data in the buffer. Bit 17 indicates that + * the sequence ID is valid and it is waiting for the TX timestamp value. + * Bit 18 indicates whether the timestamp is from a SW request (value of 1) + * or HW request (value of 0). Writing a 1 to bit 16 will clear the buffer. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_SEQID 0x187ec +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the lower 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_LSB 0x187e4 +/* [R 32] Packet TimeSync information that is buffered in 1-deep FIFOs for + * MCP. This location returns the upper 32 bits of timestamp value. + */ +#define NIG_REG_P1_TLLH_PTP_BUF_TS_MSB 0x187e8 +/* [RW 11] Mask register for the various parameters used in determining PTP + * packet presence. Set each bit to 1 to mask out the particular parameter. + * 0-IPv4 DA 0 of 224.0.1.129. 1-IPv4 DA 1 of 224.0.0.107. 2-IPv6 DA 0 of + * 0xFF0*:0:0:0:0:0:0:181. 3-IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B. 4-UDP + * destination port 0 of 319. 5-UDP destination port 1 of 320. 6-MAC + * Ethertype 0 of 0x88F7. 7-configurable MAC Ethertype 1. 8-MAC DA 0 of + * 0x01-1B-19-00-00-00. 9-MAC DA 1 of 0x01-80-C2-00-00-0E. 10-configurable + * MAC DA 2. The reset default is set to mask out all parameters. + */ +#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8 +/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set + * each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} . + * 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP + * 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1; + * UDP DP 0} . 7-{IPv6 DA 1; UDP DP 1} . 8-{MAC DA 0; Ethertype 0} . 9-{MAC + * DA 1; Ethertype 0} . 10-{MAC DA 0; Ethertype 1} . 11-{MAC DA 1; Ethertype + * 1} . 12-{MAC DA 2; Ethertype 0} . 13-{MAC DA 2; Ethertype 1} . The reset + * default is to mask out all of the rules. + */ +#define NIG_REG_P1_TLLH_PTP_RULE_MASK 0x187fc /* [RW 32] Specify which of the credit registers the client is to be mapped * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are * for client 0; bits [35:32] are for client 8. For clients that are not @@ -2513,6 +2685,10 @@ swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then ort swap is equal to ~nig_registers_port_swap.port_swap */ #define NIG_REG_STRAP_OVERRIDE 0x10398 +/* [WB 64] Addresses for TimeSync related registers in the timesync + * generator sub-module. + */ +#define NIG_REG_TIMESYNC_GEN_REG 0x18800 /* [RW 1] output enable for RX_XCM0 IF */ #define NIG_REG_XCM0_OUT_EN 0x100f0 /* [RW 1] output enable for RX_XCM1 IF */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index b1936044767a..7bc2924a7e24 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4019,6 +4019,7 @@ static int bnx2x_setup_rss(struct bnx2x *bp, struct bnx2x_raw_obj *r = &o->raw; struct eth_rss_update_ramrod_data *data = (struct eth_rss_update_ramrod_data *)(r->rdata); + u16 caps = 0; u8 rss_mode = 0; int rc; @@ -4042,28 +4043,34 @@ static int bnx2x_setup_rss(struct bnx2x *bp, /* RSS capabilities */ if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY; if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; + + if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags)) + caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY; + + /* RSS keys */ + if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { + memcpy(&data->rss_key[0], &p->rss_key[0], + sizeof(data->rss_key)); + caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; + } + + data->capabilities = cpu_to_le16(caps); /* Hashing mask */ data->rss_result_mask = p->rss_result_mask; @@ -4084,13 +4091,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp, if (netif_msg_ifup(bp)) bnx2x_debug_print_ind_table(bp, p); - /* RSS keys */ - if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) { - memcpy(&data->rss_key[0], &p->rss_key[0], - sizeof(data->rss_key)); - data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; - } - /* No need for an explicit memory barrier here as long as we * ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory @@ -4336,6 +4336,8 @@ static void bnx2x_q_fill_init_general_data(struct bnx2x *bp, test_bit(BNX2X_Q_FLG_FCOE, flags) ? LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW; + gen_data->fp_hsi_ver = ETH_FP_HSI_VERSION; + DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n", gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg); } @@ -4357,12 +4359,13 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o, test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); tx_data->force_default_pri_flg = test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); - + tx_data->refuse_outband_vlan_flg = + test_bit(BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, flags); tx_data->tunnel_lso_inc_ip_id = test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags); tx_data->tunnel_non_lso_pcsum_location = - test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT : - PCSUM_ON_BD; + test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? CSUM_ON_PKT : + CSUM_ON_BD; tx_data->tx_status_block_id = params->fw_sb_id; tx_data->tx_sb_index_number = params->sb_cq_index; @@ -4722,6 +4725,12 @@ static void bnx2x_q_fill_update_data(struct bnx2x *bp, data->tx_switching_change_flg = test_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, ¶ms->update_flags); + + /* PTP */ + data->handle_ptp_pkts_flg = + test_bit(BNX2X_Q_UPDATE_PTP_PKTS, ¶ms->update_flags); + data->handle_ptp_pkts_change_flg = + test_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG, ¶ms->update_flags); } static inline int bnx2x_q_send_update(struct bnx2x *bp, @@ -5376,6 +5385,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) next_state = BNX2X_F_STATE_STARTED; + else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_STARTED; + else if (cmd == BNX2X_F_CMD_TX_STOP) next_state = BNX2X_F_STATE_TX_STOPPED; @@ -5385,6 +5398,10 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp, (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) next_state = BNX2X_F_STATE_TX_STOPPED; + else if ((cmd == BNX2X_F_CMD_SET_TIMESYNC) && + (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) + next_state = BNX2X_F_STATE_TX_STOPPED; + else if (cmd == BNX2X_F_CMD_TX_START) next_state = BNX2X_F_STATE_STARTED; @@ -5652,9 +5669,27 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp, rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->path_id = BP_PATH(bp); rdata->network_cos_mode = start_params->network_cos_mode; - rdata->gre_tunnel_mode = start_params->gre_tunnel_mode; - rdata->gre_tunnel_rss = start_params->gre_tunnel_rss; + rdata->tunnel_mode = start_params->tunnel_mode; + rdata->gre_tunnel_type = start_params->gre_tunnel_type; + rdata->inner_gre_rss_en = start_params->inner_gre_rss_en; + rdata->vxlan_dst_port = cpu_to_le16(4789); + rdata->sd_accept_mf_clss_fail = start_params->class_fail; + if (start_params->class_fail_ethtype) { + rdata->sd_accept_mf_clss_fail_match_ethtype = 1; + rdata->sd_accept_mf_clss_fail_ethtype = + cpu_to_le16(start_params->class_fail_ethtype); + } + rdata->sd_vlan_force_pri_flg = start_params->sd_vlan_force_pri; + rdata->sd_vlan_force_pri_val = start_params->sd_vlan_force_pri_val; + if (start_params->sd_vlan_eth_type) + rdata->sd_vlan_eth_type = + cpu_to_le16(start_params->sd_vlan_eth_type); + else + rdata->sd_vlan_eth_type = + cpu_to_le16(0x8100); + + rdata->no_added_tags = start_params->no_added_tags; /* No need for an explicit memory barrier here as long we would * need to ensure the ordering of writing to the SPQ element * and updating of the SPQ producer which involves a memory @@ -5680,8 +5715,52 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp, memset(rdata, 0, sizeof(*rdata)); /* Fill the ramrod data with provided parameters */ - rdata->tx_switch_suspend_change_flg = 1; - rdata->tx_switch_suspend = switch_update_params->suspend; + if (test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + &switch_update_params->changes)) { + rdata->tx_switch_suspend_change_flg = 1; + rdata->tx_switch_suspend = + test_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + &switch_update_params->changes); + } + + if (test_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, + &switch_update_params->changes)) { + rdata->sd_vlan_tag_change_flg = 1; + rdata->sd_vlan_tag = + cpu_to_le16(switch_update_params->vlan); + } + + if (test_bit(BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, + &switch_update_params->changes)) { + rdata->sd_vlan_eth_type_change_flg = 1; + rdata->sd_vlan_eth_type = + cpu_to_le16(switch_update_params->vlan_eth_type); + } + + if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, + &switch_update_params->changes)) { + rdata->sd_vlan_force_pri_change_flg = 1; + if (test_bit(BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, + &switch_update_params->changes)) + rdata->sd_vlan_force_pri_flg = 1; + rdata->sd_vlan_force_pri_flg = + switch_update_params->vlan_force_prio; + } + + if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + &switch_update_params->changes)) { + rdata->update_tunn_cfg_flg = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + &switch_update_params->changes)) + rdata->tunn_clss_en = 1; + if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, + &switch_update_params->changes)) + rdata->inner_gre_rss_en = 1; + rdata->tunnel_mode = switch_update_params->tunnel_mode; + rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type; + rdata->vxlan_dst_port = cpu_to_le16(4789); + } + rdata->echo = SWITCH_UPDATE; /* No need for an explicit memory barrier here as long as we @@ -5817,6 +5896,42 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp, U64_LO(data_mapping), NONE_CONNECTION_TYPE); } +static inline +int bnx2x_func_send_set_timesync(struct bnx2x *bp, + struct bnx2x_func_state_params *params) +{ + struct bnx2x_func_sp_obj *o = params->f_obj; + struct set_timesync_ramrod_data *rdata = + (struct set_timesync_ramrod_data *)o->rdata; + dma_addr_t data_mapping = o->rdata_mapping; + struct bnx2x_func_set_timesync_params *set_timesync_params = + ¶ms->params.set_timesync; + + memset(rdata, 0, sizeof(*rdata)); + + /* Fill the ramrod data with provided parameters */ + rdata->drift_adjust_cmd = set_timesync_params->drift_adjust_cmd; + rdata->offset_cmd = set_timesync_params->offset_cmd; + rdata->add_sub_drift_adjust_value = + set_timesync_params->add_sub_drift_adjust_value; + rdata->drift_adjust_value = set_timesync_params->drift_adjust_value; + rdata->drift_adjust_period = set_timesync_params->drift_adjust_period; + rdata->offset_delta.lo = + cpu_to_le32(U64_LO(set_timesync_params->offset_delta)); + rdata->offset_delta.hi = + cpu_to_le32(U64_HI(set_timesync_params->offset_delta)); + + DP(BNX2X_MSG_SP, "Set timesync command params: drift_cmd = %d, offset_cmd = %d, add_sub_drift = %d, drift_val = %d, drift_period = %d, offset_lo = %d, offset_hi = %d\n", + rdata->drift_adjust_cmd, rdata->offset_cmd, + rdata->add_sub_drift_adjust_value, rdata->drift_adjust_value, + rdata->drift_adjust_period, rdata->offset_delta.lo, + rdata->offset_delta.hi); + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_TIMESYNC, 0, + U64_HI(data_mapping), + U64_LO(data_mapping), NONE_CONNECTION_TYPE); +} + static int bnx2x_func_send_cmd(struct bnx2x *bp, struct bnx2x_func_state_params *params) { @@ -5839,6 +5954,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp, return bnx2x_func_send_tx_start(bp, params); case BNX2X_F_CMD_SWITCH_UPDATE: return bnx2x_func_send_switch_update(bp, params); + case BNX2X_F_CMD_SET_TIMESYNC: + return bnx2x_func_send_set_timesync(bp, params); default: BNX2X_ERR("Unknown command: %d\n", params->cmd); return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 718ecd294661..e97275f456c0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -711,6 +711,7 @@ enum { BNX2X_RSS_IPV6, BNX2X_RSS_IPV6_TCP, BNX2X_RSS_IPV6_UDP, + BNX2X_RSS_GRE_INNER_HDRS, }; struct bnx2x_config_rss_params { @@ -769,7 +770,9 @@ enum { BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG, BNX2X_Q_UPDATE_SILENT_VLAN_REM, BNX2X_Q_UPDATE_TX_SWITCHING_CHNG, - BNX2X_Q_UPDATE_TX_SWITCHING + BNX2X_Q_UPDATE_TX_SWITCHING, + BNX2X_Q_UPDATE_PTP_PKTS_CHNG, + BNX2X_Q_UPDATE_PTP_PKTS, }; /* Allowed Queue states */ @@ -831,6 +834,7 @@ enum { BNX2X_Q_FLG_ANTI_SPOOF, BNX2X_Q_FLG_SILENT_VLAN_REM, BNX2X_Q_FLG_FORCE_DEFAULT_PRI, + BNX2X_Q_FLG_REFUSE_OUTBAND_VLAN, BNX2X_Q_FLG_PCSUM_ON_PKT, BNX2X_Q_FLG_TUN_INC_INNER_IP_ID }; @@ -851,6 +855,10 @@ enum bnx2x_q_type { #define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ #define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN) +/* DMAE channel to be used by FW for timesync workaroun. A driver that sends + * timesync-related ramrods must not use this DMAE command ID. + */ +#define FW_DMAE_CMD_ID 6 struct bnx2x_queue_init_params { struct { @@ -1085,6 +1093,20 @@ struct bnx2x_queue_sp_obj { }; /********************** Function state update *********************************/ + +/* UPDATE command options */ +enum { + BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG, + BNX2X_F_UPDATE_TX_SWITCH_SUSPEND, + BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG, + BNX2X_F_UPDATE_SD_VLAN_ETH_TYPE_CHNG, + BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG, + BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG, + BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, + BNX2X_F_UPDATE_TUNNEL_CLSS_EN, + BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN, +}; + /* Allowed Function states */ enum bnx2x_func_state { BNX2X_F_STATE_RESET, @@ -1105,6 +1127,7 @@ enum bnx2x_func_cmd { BNX2X_F_CMD_TX_STOP, BNX2X_F_CMD_TX_START, BNX2X_F_CMD_SWITCH_UPDATE, + BNX2X_F_CMD_SET_TIMESYNC, BNX2X_F_CMD_MAX, }; @@ -1146,18 +1169,44 @@ struct bnx2x_func_start_params { /* Function cos mode */ u8 network_cos_mode; - /* NVGRE classification enablement */ - u8 nvgre_clss_en; + /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */ + u8 tunnel_mode; - /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ - u8 gre_tunnel_mode; + /* tunneling classification enablement */ + u8 tunn_clss_en; + + /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */ + u8 gre_tunnel_type; + + /* Enables Inner GRE RSS on the function, depends on the client RSS + * capailities + */ + u8 inner_gre_rss_en; + + /* Allows accepting of packets failing MF classification, possibly + * only matching a given ethertype + */ + u8 class_fail; + u16 class_fail_ethtype; - /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */ - u8 gre_tunnel_rss; + /* Override priority of output packets */ + u8 sd_vlan_force_pri; + u8 sd_vlan_force_pri_val; + + /* Replace vlan's ethertype */ + u16 sd_vlan_eth_type; + + /* Prevent inner vlans from being added by FW */ + u8 no_added_tags; }; struct bnx2x_func_switch_update_params { - u8 suspend; + unsigned long changes; /* BNX2X_F_UPDATE_XX bits */ + u16 vlan; + u16 vlan_eth_type; + u8 vlan_force_prio; + u8 tunnel_mode; + u8 gre_tunnel_type; }; struct bnx2x_func_afex_update_params { @@ -1172,6 +1221,7 @@ struct bnx2x_func_afex_viflists_params { u8 afex_vif_list_command; u8 func_to_clear; }; + struct bnx2x_func_tx_start_params { struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; u8 dcb_enabled; @@ -1179,6 +1229,24 @@ struct bnx2x_func_tx_start_params { u8 dont_add_pri_0_en; }; +struct bnx2x_func_set_timesync_params { + /* Reset, set or keep the current drift value */ + u8 drift_adjust_cmd; + + /* Dec, inc or keep the current offset */ + u8 offset_cmd; + + /* Drift value direction */ + u8 add_sub_drift_adjust_value; + + /* Drift, period and offset values to be used according to the commands + * above. + */ + u8 drift_adjust_value; + u32 drift_adjust_period; + u64 offset_delta; +}; + struct bnx2x_func_state_params { struct bnx2x_func_sp_obj *f_obj; @@ -1197,6 +1265,7 @@ struct bnx2x_func_state_params { struct bnx2x_func_afex_update_params afex_update; struct bnx2x_func_afex_viflists_params afex_viflists; struct bnx2x_func_tx_start_params tx_start; + struct bnx2x_func_set_timesync_params set_timesync; } params; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 662310c5f4e9..c88b20af87df 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1125,7 +1125,7 @@ static int bnx2x_ari_enabled(struct pci_dev *dev) return dev->bus->self && dev->bus->self->ari_enabled; } -static void +static int bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) { int sb_id; @@ -1150,6 +1150,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)); } DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool); + return BP_VFDB(bp)->vf_sbs_pool; } static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) @@ -1314,15 +1315,17 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, } /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ - bnx2x_get_vf_igu_cam_info(bp); + if (!bnx2x_get_vf_igu_cam_info(bp)) { + BNX2X_ERR("No entries in IGU CAM for vfs\n"); + err = -EINVAL; + goto failed; + } /* allocate the queue arrays for all VFs */ bp->vfdb->vfqs = kzalloc( BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue), GFP_KERNEL); - DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs); - if (!bp->vfdb->vfqs) { BNX2X_ERR("failed to allocate vf queue array\n"); err = -ENOMEM; @@ -1349,9 +1352,7 @@ void bnx2x_iov_remove_one(struct bnx2x *bp) if (!IS_SRIOV(bp)) return; - DP(BNX2X_MSG_IOV, "about to call disable sriov\n"); - pci_disable_sriov(bp->pdev); - DP(BNX2X_MSG_IOV, "sriov disabled\n"); + bnx2x_disable_sriov(bp); /* disable access to all VFs */ for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { @@ -1985,21 +1986,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count; } -static inline -struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id) -{ - int i; - struct bnx2x_virtf *vf = NULL; - - for_each_vf(bp, i) { - vf = BP_VF(bp, i); - if (stat_id >= vf->igu_base_id && - stat_id < vf->igu_base_id + vf_sb_count(vf)) - break; - } - return vf; -} - /* VF API helpers */ static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid, u8 enable) @@ -2362,12 +2348,6 @@ int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf) return rc; } -static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp, - struct bnx2x_virtf *vf, u32 *sbdf) -{ - *sbdf = vf->devfn | (vf->bus << 8); -} - void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, enum channel_tlvs tlv) { @@ -2416,7 +2396,7 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, /* log the unlock */ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n", - vf->abs_vfid, vf->op_current); + vf->abs_vfid, current_tlv); } static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable) @@ -2501,7 +2481,7 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param) bp->requested_nr_virtfn = num_vfs_param; if (num_vfs_param == 0) { bnx2x_set_pf_tx_switching(bp, false); - pci_disable_sriov(dev); + bnx2x_disable_sriov(bp); return 0; } else { return bnx2x_enable_sriov(bp); @@ -2614,6 +2594,12 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) void bnx2x_disable_sriov(struct bnx2x *bp) { + if (pci_vfs_assigned(bp->pdev)) { + DP(BNX2X_MSG_IOV, + "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return; + } + pci_disable_sriov(bp->pdev); } @@ -2628,7 +2614,7 @@ static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx, } if (!IS_SRIOV(bp)) { - BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n"); + BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index ca1055f3d8af..01bafa4ac045 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -299,7 +299,8 @@ struct bnx2x_vfdb { #define BP_VFDB(bp) ((bp)->vfdb) /* vf array */ struct bnx2x_virtf *vfs; -#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[idx])) +#define BP_VF(bp, idx) ((BP_VFDB(bp) && (bp)->vfdb->vfs) ? \ + &((bp)->vfdb->vfs[idx]) : NULL) #define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var) /* queue array - for all vfs */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index ca47665f94bf..d1608297c773 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -137,7 +137,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp) cpu_to_le16(bp->stats_counter++); DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", - bp->fw_stats_req->hdr.drv_stats_counter); + le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter)); /* adjust the ramrod to include VF queues statistics */ bnx2x_iov_adjust_stats_req(bp); @@ -200,7 +200,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } } -static int bnx2x_stats_comp(struct bnx2x *bp) +static void bnx2x_stats_comp(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); int cnt = 10; @@ -214,7 +214,6 @@ static int bnx2x_stats_comp(struct bnx2x *bp) cnt--; usleep_range(1000, 2000); } - return 1; } /* @@ -1630,6 +1629,11 @@ void bnx2x_stats_init(struct bnx2x *bp) int /*abs*/port = BP_PORT(bp); int mb_idx = BP_FW_MB_IDX(bp); + if (IS_VF(bp)) { + bnx2x_memset_stats(bp); + return; + } + bp->stats_pending = 0; bp->executer_idx = 0; bp->stats_counter = 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 54e0427a9ee6..b1d9c44aa56c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -583,7 +583,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, flags |= VFPF_QUEUE_FLG_STATS; flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; flags |= VFPF_QUEUE_FLG_VLAN; - DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); /* Common */ req->vf_qid = fp_idx; @@ -952,14 +951,6 @@ static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid) REG_WR8(bp, addr, 1); } -static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp) -{ - int i; - - for_each_vf(bp, i) - storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid)); -} - /* enable vf_pf mailbox (aka vf-pf-channel) */ void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid) { diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index a6a9f284c8dd..23f23c97c2ad 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -383,7 +383,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, break; rcu_read_lock(); - if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) { + if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { rc = -ENODEV; rcu_read_unlock(); break; @@ -527,7 +527,7 @@ int cnic_unregister_driver(int ulp_type) list_for_each_entry(dev, &cnic_dev_list, list) { struct cnic_local *cp = dev->cnic_priv; - if (rcu_dereference(cp->ulp_ops[ulp_type])) { + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { pr_err("%s: Type %d still has devices registered\n", __func__, ulp_type); read_unlock(&cnic_dev_lock); @@ -575,7 +575,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, mutex_unlock(&cnic_lock); return -EAGAIN; } - if (rcu_dereference(cp->ulp_ops[ulp_type])) { + if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { pr_err("%s: Type %d has already been registered to this device\n", __func__, ulp_type); mutex_unlock(&cnic_lock); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 5cc9cae21ed5..e8c601d26c64 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2439,6 +2439,13 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT, major, (reg >> 16) & 0x0f, reg & 0xffff); + /* Store the integrated PHY revision for the MDIO probing function + * to pass this information to the PHY driver. The PHY driver expects + * to find the PHY major revision in bits 15:8 while the GENET register + * stores that information in bits 7:0, account for that. + */ + priv->gphy_rev = (reg & 0xffff) << 8; + #ifdef CONFIG_PHYS_ADDR_T_64BIT if (!(params->flags & GENET_HAS_40BITS)) pr_warn("GENET does not support 40-bits PA\n"); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index c862d0666771..ad95fe57ebcd 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -545,6 +545,7 @@ struct bcmgenet_priv { struct phy_device *phydev; struct device_node *phy_dn; struct mii_bus *mii_bus; + u16 gphy_rev; /* PHY device variables */ int old_duplex; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index c88f7ae99636..75b26cbaa7c1 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -296,7 +296,7 @@ static int bcmgenet_mii_probe(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; struct phy_device *phydev; - unsigned int phy_flags; + u32 phy_flags; int ret; if (priv->phydev) { @@ -315,8 +315,11 @@ static int bcmgenet_mii_probe(struct net_device *dev) priv->phy_dn = of_node_get(dn); } - phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0, - priv->phy_interface); + /* Communicate the integrated PHY revision */ + phy_flags = priv->gphy_rev; + + phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, + phy_flags, priv->phy_interface); if (!phydev) { pr_err("could not attach to PHY\n"); return -ENODEV; @@ -338,15 +341,6 @@ static int bcmgenet_mii_probe(struct net_device *dev) return ret; } - phy_flags = PHY_BRCM_100MBPS_WAR; - - /* workarounds are only needed for 100Mpbs PHYs, and - * never on GENET V1 hardware - */ - if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv)) - phy_flags = 0; - - phydev->dev_flags |= phy_flags; phydev->advertising = phydev->supported; /* The internal PHY has its link interrupts routed to the diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 13f9636cdba7..903466ef41c0 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -107,7 +107,8 @@ bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport, { struct bfi_enet_enable_req *admin_req = ðport->bfi_enet_cmd.admin_req; - struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr; + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); switch (admin_req->enable) { case BNA_STATUS_T_ENABLED: @@ -133,7 +134,8 @@ bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport, { struct bfi_enet_diag_lb_req *diag_lb_req = ðport->bfi_enet_cmd.lpbk_req; - struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr; + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); switch (diag_lb_req->enable) { case BNA_STATUS_T_ENABLED: @@ -161,7 +163,8 @@ static void bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth, struct bfi_msgq_mhdr *msghdr) { - struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr; + struct bfi_enet_attr_rsp *rsp = + container_of(msghdr, struct bfi_enet_attr_rsp, mh); /** * Store only if not set earlier, since BNAD can override the HW diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 85e63546abe3..8ee3fdcc17cd 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -715,7 +715,7 @@ bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) { struct bfi_enet_rsp *rsp = - (struct bfi_enet_rsp *)msghdr; + container_of(msghdr, struct bfi_enet_rsp, mh); if (rsp->error) { /* Clear ucast from cache */ @@ -732,7 +732,7 @@ bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, struct bfi_enet_mcast_add_req *req = &rxf->bfi_enet_cmd.mcast_add_req; struct bfi_enet_mcast_add_rsp *rsp = - (struct bfi_enet_mcast_add_rsp *)msghdr; + container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh); bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, ntohs(rsp->handle)); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ca5d7798b265..d9b8e94b805f 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2241,9 +2241,9 @@ static int __init macb_probe(struct platform_device *pdev) netif_carrier_off(dev); - netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", - macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, - dev->irq, dev->dev_addr); + netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", + macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), + dev->base_addr, dev->irq, dev->dev_addr); phydev = bp->phy_dev; netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e5be511a3c38..eeec49540233 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -283,6 +283,7 @@ static const struct pci_device_id cxgb4_pci_tbl[] = { CH_DEVICE(0x5083, 4), CH_DEVICE(0x5084, 4), CH_DEVICE(0x5085, 4), + CH_DEVICE(0x5086, 4), CH_DEVICE(0x5401, 4), CH_DEVICE(0x5402, 4), CH_DEVICE(0x5403, 4), @@ -310,6 +311,7 @@ static const struct pci_device_id cxgb4_pci_tbl[] = { CH_DEVICE(0x5483, 4), CH_DEVICE(0x5484, 4), CH_DEVICE(0x5485, 4), + CH_DEVICE(0x5486, 4), { 0, } }; @@ -4390,7 +4392,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, * bond. We need to find such different adapters and add clip * in all of them only once. */ - read_lock(&bond->lock); bond_for_each_slave(bond, slave, iter) { if (!first_pdev) { ret = clip_add(slave->dev, ifa, event); @@ -4404,7 +4405,6 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this, to_pci_dev(slave->dev->dev.parent)) ret = clip_add(slave->dev, ifa, event); } - read_unlock(&bond->lock); } else ret = clip_add(ifa->idev->dev, ifa, event); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 41d04462b72e..22d7581341a9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -1099,6 +1099,9 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) { int ret = 0; + if (end >= adapter->params.sf_nsec) + return -EINVAL; + while (start <= end) { if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || (ret = sf1_write(adapter, 4, 0, 1, @@ -3850,8 +3853,20 @@ int t4_wait_dev_ready(struct adapter *adap) return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; } +struct flash_desc { + u32 vendor_and_model_id; + u32 size_mb; +}; + static int get_flash_params(struct adapter *adap) { + /* Table for non-Numonix supported flash parts. Numonix parts are left + * to the preexisting code. All flash parts have 64KB sectors. + */ + static struct flash_desc supported_flash[] = { + { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ + }; + int ret; u32 info; @@ -3862,6 +3877,14 @@ static int get_flash_params(struct adapter *adap) if (ret) return ret; + for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret) + if (supported_flash[ret].vendor_and_model_id == info) { + adap->params.sf_size = supported_flash[ret].size_mb; + adap->params.sf_nsec = + adap->params.sf_size / SF_SEC_SIZE; + return 0; + } + if ((info & 0xff) != 0x20) /* not a Numonix flash */ return -EINVAL; info >>= 16; /* log2 of size */ @@ -3874,6 +3897,10 @@ static int get_flash_params(struct adapter *adap) adap->params.sf_size = 1 << info; adap->params.sf_fw_start = t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; + + if (adap->params.sf_size < FLASH_MIN_SIZE) + dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n", + adap->params.sf_size, FLASH_MIN_SIZE); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 35e3d8e32881..6833a7b02137 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -175,7 +175,7 @@ enum { * Location of firmware image in FLASH. */ FLASH_FW_START_SEC = 8, - FLASH_FW_NSECS = 8, + FLASH_FW_NSECS = 16, FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), @@ -206,6 +206,12 @@ enum { FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), + /* We don't support FLASH devices which can't support the full + * standard set of sections which we need for normal + * operations. + */ + FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE, + FLASH_FPGA_CFG_START_SEC = 15, FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC), diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2102a4c91737..82534031cada 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2912,7 +2912,6 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) static const struct pci_device_id cxgb4vf_pci_tbl[] = { CH_DEVICE(0xb000, 0), /* PE10K FPGA */ - CH_DEVICE(0x4800, 0), /* T440-dbg */ CH_DEVICE(0x4801, 0), /* T420-cr */ CH_DEVICE(0x4802, 0), /* T422-cr */ CH_DEVICE(0x4803, 0), /* T440-cr */ @@ -2934,7 +2933,6 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = { CH_DEVICE(0x4880, 6), CH_DEVICE(0x4880, 7), CH_DEVICE(0x4880, 8), - CH_DEVICE(0x5800, 0), /* T580-dbg */ CH_DEVICE(0x5801, 0), /* T520-cr */ CH_DEVICE(0x5802, 0), /* T522-cr */ CH_DEVICE(0x5803, 0), /* T540-cr */ @@ -2962,6 +2960,7 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = { CH_DEVICE(0x5883, 0), CH_DEVICE(0x5884, 0), CH_DEVICE(0x5885, 0), + CH_DEVICE(0x5886, 0), { 0, } }; diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 962510f391df..5ba5ad071bb6 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -186,6 +186,7 @@ struct enic { ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; unsigned int cq_count; struct enic_rfs_flw_tbl rfs_h; + u32 rx_copybreak; }; static inline struct device *enic_get_dev(struct enic *enic) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 523c9ceb04c0..85173d620758 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -379,6 +379,43 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } +static int enic_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct enic *enic = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = enic->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int enic_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct enic *enic = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + enic->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + static const struct ethtool_ops enic_ethtool_ops = { .get_settings = enic_get_settings, .get_drvinfo = enic_get_drvinfo, @@ -391,6 +428,8 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_coalesce = enic_get_coalesce, .set_coalesce = enic_set_coalesce, .get_rxnfc = enic_get_rxnfc, + .get_tunable = enic_get_tunable, + .set_tunable = enic_set_tunable, }; void enic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index c8832bc1c5f7..929bfe70080a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -66,6 +66,8 @@ #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ +#define RX_COPYBREAK_DEFAULT 256 + /* Supported devices */ static const struct pci_device_id enic_id_table[] = { { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, @@ -924,6 +926,7 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(buf->os_buf); + buf->os_buf = NULL; } static int enic_rq_alloc_buf(struct vnic_rq *rq) @@ -934,7 +937,24 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) unsigned int len = netdev->mtu + VLAN_ETH_HLEN; unsigned int os_buf_index = 0; dma_addr_t dma_addr; + struct vnic_rq_buf *buf = rq->to_use; + + if (buf->os_buf) { + buf = buf->next; + rq->to_use = buf; + rq->ring.desc_avail--; + if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) { + /* Adding write memory barrier prevents compiler and/or + * CPU reordering, thus avoiding descriptor posting + * before descriptor is initialized. Otherwise, hardware + * can read stale descriptor fields. + */ + wmb(); + iowrite32(buf->index, &rq->ctrl->posted_index); + } + return 0; + } skb = netdev_alloc_skb_ip_align(netdev, len); if (!skb) return -ENOMEM; @@ -957,6 +977,25 @@ static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, pkt_size->small_pkt_bytes_cnt += pkt_len; } +static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb, + struct vnic_rq_buf *buf, u16 len) +{ + struct enic *enic = netdev_priv(netdev); + struct sk_buff *new_skb; + + if (len > enic->rx_copybreak) + return false; + new_skb = netdev_alloc_skb_ip_align(netdev, len); + if (!new_skb) + return false; + pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len, + DMA_FROM_DEVICE); + memcpy(new_skb->data, (*skb)->data, len); + *skb = new_skb; + + return true; +} + static void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) @@ -978,9 +1017,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, return; skb = buf->os_buf; - prefetch(skb->data - NET_IP_ALIGN); - pci_unmap_single(enic->pdev, buf->dma_addr, - buf->len, PCI_DMA_FROMDEVICE); cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, @@ -1011,6 +1047,13 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, /* Good receive */ + if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { + buf->os_buf = NULL; + pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, + PCI_DMA_FROMDEVICE); + } + prefetch(skb->data - NET_IP_ALIGN); + skb_put(skb, bytes_written); skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, q_number); @@ -2531,6 +2574,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(dev, "Cannot register net device, aborting\n"); goto err_out_dev_deinit; } + enic->rx_copybreak = RX_COPYBREAK_DEFAULT; return 0; diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 37472ce4fac3..62f7b7baf93c 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -847,8 +847,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) */ if ((err == ERR_ECMDUNKNOWN) || (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { - pr_warning("Using default conversion factor for " - "interrupt coalesce timer\n"); + pr_warn("Using default conversion factor for interrupt coalesce timer\n"); vnic_dev_intr_coal_timer_info_default(vdev); return 0; } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 322213d901d5..c8205606c775 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -328,10 +328,10 @@ static void allocate_rx_buffer(struct net_device *); static void update_cr6(u32, void __iomem *); static void send_filter_frame(struct DEVICE *); static void dm9132_id_table(struct DEVICE *); -static u16 phy_read(void __iomem *, u8, u8, u32); -static void phy_write(void __iomem *, u8, u8, u16, u32); -static void phy_write_1bit(void __iomem *, u32); -static u16 phy_read_1bit(void __iomem *); +static u16 dmfe_phy_read(void __iomem *, u8, u8, u32); +static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32); +static void dmfe_phy_write_1bit(void __iomem *, u32); +static u16 dmfe_phy_read_1bit(void __iomem *); static u8 dmfe_sense_speed(struct dmfe_board_info *); static void dmfe_process_mode(struct dmfe_board_info *); static void dmfe_timer(unsigned long); @@ -770,7 +770,7 @@ static int dmfe_stop(struct DEVICE *dev) /* Reset & stop DM910X board */ dw32(DCR0, DM910X_RESET); udelay(100); - phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); + dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); /* free interrupt */ free_irq(db->pdev->irq, dev); @@ -1154,7 +1154,7 @@ static void dmfe_timer(unsigned long data) if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { db->cr6_data &= ~0x40000; update_cr6(db->cr6_data, ioaddr); - phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); + dmfe_phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); db->cr6_data |= 0x40000; update_cr6(db->cr6_data, ioaddr); db->timer.expires = DMFE_TIMER_WUT + HZ * 2; @@ -1230,9 +1230,9 @@ static void dmfe_timer(unsigned long data) */ /* need a dummy read because of PHY's register latch*/ - phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); - link_ok_phy = (phy_read (db->ioaddr, - db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; + dmfe_phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); + link_ok_phy = (dmfe_phy_read (db->ioaddr, + db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; if (link_ok_phy != link_ok) { DMFE_DBUG (0, "PHY and chip report different link status", 0); @@ -1247,8 +1247,8 @@ static void dmfe_timer(unsigned long data) /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ /* AUTO or force 1M Homerun/Longrun don't need */ if ( !(db->media_mode & 0x38) ) - phy_write(db->ioaddr, db->phy_addr, - 0, 0x1000, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, + 0, 0x1000, db->chip_id); /* AUTO mode, if INT phyxcer link failed, select EXT device */ if (db->media_mode & DMFE_AUTO) { @@ -1649,16 +1649,16 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *db) /* CR6 bit18=0, select 10/100M */ update_cr6(db->cr6_data & ~0x40000, ioaddr); - phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); - phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); + phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); + phy_mode = dmfe_phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); if ( (phy_mode & 0x24) == 0x24 ) { if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ - phy_mode = phy_read(db->ioaddr, - db->phy_addr, 7, db->chip_id) & 0xf000; + phy_mode = dmfe_phy_read(db->ioaddr, + db->phy_addr, 7, db->chip_id) & 0xf000; else /* DM9102/DM9102A */ - phy_mode = phy_read(db->ioaddr, - db->phy_addr, 17, db->chip_id) & 0xf000; + phy_mode = dmfe_phy_read(db->ioaddr, + db->phy_addr, 17, db->chip_id) & 0xf000; switch (phy_mode) { case 0x1000: db->op_mode = DMFE_10MHF; break; case 0x2000: db->op_mode = DMFE_10MFD; break; @@ -1695,15 +1695,15 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db) /* DM9009 Chip: Phyxcer reg18 bit12=0 */ if (db->chip_id == PCI_DM9009_ID) { - phy_reg = phy_read(db->ioaddr, - db->phy_addr, 18, db->chip_id) & ~0x1000; + phy_reg = dmfe_phy_read(db->ioaddr, + db->phy_addr, 18, db->chip_id) & ~0x1000; - phy_write(db->ioaddr, - db->phy_addr, 18, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, + db->phy_addr, 18, phy_reg, db->chip_id); } /* Phyxcer capability setting */ - phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; if (db->media_mode & DMFE_AUTO) { /* AUTO Mode */ @@ -1724,13 +1724,13 @@ static void dmfe_set_phyxcer(struct dmfe_board_info *db) phy_reg|=db->PHY_reg4; db->media_mode|=DMFE_AUTO; } - phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); /* Restart Auto-Negotiation */ if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) - phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id); if ( !db->chip_type ) - phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); } @@ -1762,7 +1762,7 @@ static void dmfe_process_mode(struct dmfe_board_info *db) /* 10/100M phyxcer force mode need */ if ( !(db->media_mode & 0x18)) { /* Forece Mode */ - phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); if ( !(phy_reg & 0x1) ) { /* parter without N-Way capability */ phy_reg = 0x0; @@ -1772,12 +1772,12 @@ static void dmfe_process_mode(struct dmfe_board_info *db) case DMFE_100MHF: phy_reg = 0x2000; break; case DMFE_100MFD: phy_reg = 0x2100; break; } - phy_write(db->ioaddr, - db->phy_addr, 0, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, + db->phy_addr, 0, phy_reg, db->chip_id); if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) mdelay(20); - phy_write(db->ioaddr, - db->phy_addr, 0, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, + db->phy_addr, 0, phy_reg, db->chip_id); } } } @@ -1787,8 +1787,8 @@ static void dmfe_process_mode(struct dmfe_board_info *db) * Write a word to Phy register */ -static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, - u16 phy_data, u32 chip_id) +static void dmfe_phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, + u16 phy_data, u32 chip_id) { u16 i; @@ -1799,34 +1799,34 @@ static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send start command(01) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_0); - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send write command(01) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_0); - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send Phy address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, - phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); /* Send register address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, - offset & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + offset & i ? PHY_DATA_1 : PHY_DATA_0); /* written trasnition */ - phy_write_1bit(ioaddr, PHY_DATA_1); - phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); /* Write a word data to PHY controller */ for ( i = 0x8000; i > 0; i >>= 1) - phy_write_1bit(ioaddr, - phy_data & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + phy_data & i ? PHY_DATA_1 : PHY_DATA_0); } } @@ -1835,7 +1835,7 @@ static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset, * Read a word data from phy register */ -static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) +static u16 dmfe_phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) { int i; u16 phy_data; @@ -1848,33 +1848,33 @@ static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) /* Send 33 synchronization clock to Phy controller */ for (i = 0; i < 35; i++) - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send start command(01) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_0); - phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); /* Send read command(10) to Phy */ - phy_write_1bit(ioaddr, PHY_DATA_1); - phy_write_1bit(ioaddr, PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_1); + dmfe_phy_write_1bit(ioaddr, PHY_DATA_0); /* Send Phy address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, - phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); /* Send register address */ for (i = 0x10; i > 0; i = i >> 1) - phy_write_1bit(ioaddr, - offset & i ? PHY_DATA_1 : PHY_DATA_0); + dmfe_phy_write_1bit(ioaddr, + offset & i ? PHY_DATA_1 : PHY_DATA_0); /* Skip transition state */ - phy_read_1bit(ioaddr); + dmfe_phy_read_1bit(ioaddr); /* read 16bit data */ for (phy_data = 0, i = 0; i < 16; i++) { phy_data <<= 1; - phy_data |= phy_read_1bit(ioaddr); + phy_data |= dmfe_phy_read_1bit(ioaddr); } } @@ -1886,7 +1886,7 @@ static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id) * Write one bit data to Phy Controller */ -static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data) +static void dmfe_phy_write_1bit(void __iomem *ioaddr, u32 phy_data) { dw32(DCR9, phy_data); /* MII Clock Low */ udelay(1); @@ -1901,7 +1901,7 @@ static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data) * Read one bit phy data from PHY controller */ -static u16 phy_read_1bit(void __iomem *ioaddr) +static u16 dmfe_phy_read_1bit(void __iomem *ioaddr) { u16 phy_data; @@ -1995,11 +1995,11 @@ static void dmfe_parse_srom(struct dmfe_board_info * db) /* Check DM9801 or DM9802 present or not */ db->HPNA_present = 0; update_cr6(db->cr6_data | 0x40000, db->ioaddr); - tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); + tmp_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { /* DM9801 or DM9802 present */ db->HPNA_timer = 8; - if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) { + if ( dmfe_phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) { /* DM9801 HomeRun */ db->HPNA_present = 1; dmfe_program_DM9801(db, tmp_reg); @@ -2025,29 +2025,29 @@ static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev) switch(HPNA_rev) { case 0xb900: /* DM9801 E3 */ db->HPNA_command |= 0x1000; - reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id); + reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id); reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000; - reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); + reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); break; case 0xb901: /* DM9801 E4 */ - reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); + reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor; - reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); + reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3; break; case 0xb902: /* DM9801 E5 */ case 0xb903: /* DM9801 E6 */ default: db->HPNA_command |= 0x1000; - reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); + reg25 = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5; - reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); + reg17 = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor; break; } - phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); - phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id); - phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id); } @@ -2060,10 +2060,10 @@ static void dmfe_program_DM9802(struct dmfe_board_info * db) uint phy_reg; if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR; - phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); - phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor; - phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id); } @@ -2077,7 +2077,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) uint phy_reg; /* Got remote device status */ - phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60; + phy_reg = dmfe_phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60; switch(phy_reg) { case 0x00: phy_reg = 0x0a00;break; /* LP/LS */ case 0x20: phy_reg = 0x0900;break; /* LP/HS */ @@ -2087,8 +2087,8 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) /* Check remote device status match our setting ot not */ if ( phy_reg != (db->HPNA_command & 0x0f00) ) { - phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, - db->chip_id); + dmfe_phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, + db->chip_id); db->HPNA_timer=8; } else db->HPNA_timer=600; /* Match, every 10 minutes, check */ diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 056b44b93477..d1017509b08a 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -1,5 +1,5 @@ /* - * drivers/net/ethernet/beckhoff/ec_bhf.c + * drivers/net/ethernet/ec_bhf.c * * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl> * @@ -18,9 +18,6 @@ * Those can be found on Bechhoff CX50xx industrial PCs. */ -#if 0 -#define DEBUG -#endif #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -74,6 +71,8 @@ #define DMA_WINDOW_SIZE_MASK 0xfffffffc +#define ETHERCAT_MASTER_ID 0x14 + static struct pci_device_id ids[] = { { PCI_DEVICE(0x15ec, 0x5000), }, { 0, } @@ -131,7 +130,6 @@ struct bhf_dma { struct ec_bhf_priv { struct net_device *net_dev; - struct pci_dev *dev; void __iomem *io; @@ -162,32 +160,6 @@ struct ec_bhf_priv { #define PRIV_TO_DEV(priv) (&(priv)->dev->dev) -#define ETHERCAT_MASTER_ID 0x14 - -static void ec_bhf_print_status(struct ec_bhf_priv *priv) -{ - struct device *dev = PRIV_TO_DEV(priv); - - dev_dbg(dev, "Frame error counter: %d\n", - ioread8(priv->mac_io + MAC_FRAME_ERR_CNT)); - dev_dbg(dev, "RX error counter: %d\n", - ioread8(priv->mac_io + MAC_RX_ERR_CNT)); - dev_dbg(dev, "CRC error counter: %d\n", - ioread8(priv->mac_io + MAC_CRC_ERR_CNT)); - dev_dbg(dev, "TX frame counter: %d\n", - ioread32(priv->mac_io + MAC_TX_FRAME_CNT)); - dev_dbg(dev, "RX frame counter: %d\n", - ioread32(priv->mac_io + MAC_RX_FRAME_CNT)); - dev_dbg(dev, "TX fifo level: %d\n", - ioread8(priv->mac_io + MAC_TX_FIFO_LVL)); - dev_dbg(dev, "Dropped frames: %d\n", - ioread8(priv->mac_io + MAC_DROPPED_FRMS)); - dev_dbg(dev, "Connected with CCAT slot: %d\n", - ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG)); - dev_dbg(dev, "Link status: %d\n", - ioread8(priv->mii_io + MII_LINK_STATUS)); -} - static void ec_bhf_reset(struct ec_bhf_priv *priv) { iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); @@ -210,8 +182,6 @@ static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) u32 addr = (u8 *)desc - priv->tx_buf.buf; iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); - - dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n"); } static int ec_bhf_desc_sent(struct tx_desc *desc) @@ -244,7 +214,6 @@ static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc) static void ec_bhf_process_rx(struct ec_bhf_priv *priv) { struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; - struct device *dev = PRIV_TO_DEV(priv); while (ec_bhf_pkt_received(desc)) { int pkt_size = (le16_to_cpu(desc->header.len) & @@ -253,20 +222,16 @@ static void ec_bhf_process_rx(struct ec_bhf_priv *priv) struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); - dev_dbg(dev, "Received packet, size: %d\n", pkt_size); - if (skb) { memcpy(skb_put(skb, pkt_size), data, pkt_size); skb->protocol = eth_type_trans(skb, priv->net_dev); - dev_dbg(dev, "Protocol type: %x\n", skb->protocol); - priv->stat_rx_bytes += pkt_size; netif_rx(skb); } else { - dev_err_ratelimited(dev, - "Couldn't allocate a skb_buff for a packet of size %u\n", - pkt_size); + dev_err_ratelimited(PRIV_TO_DEV(priv), + "Couldn't allocate a skb_buff for a packet of size %u\n", + pkt_size); } desc->header.recv = 0; @@ -276,7 +241,6 @@ static void ec_bhf_process_rx(struct ec_bhf_priv *priv) priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; desc = &priv->rx_descs[priv->rx_dnext]; } - } static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) @@ -299,14 +263,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) unsigned block_count, i; void __iomem *ec_info; - dev_dbg(dev, "Info block:\n"); - dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); - dev_dbg(dev, "Revision of function: %x\n", - (unsigned)ioread16(priv->io + INFO_BLOCK_REV)); - block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); - dev_dbg(dev, "Number of function blocks: %x\n", block_count); - for (i = 0; i < block_count; i++) { u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + INFO_BLOCK_TYPE); @@ -317,29 +274,17 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) dev_err(dev, "EtherCAT master with DMA block not found\n"); return -ENODEV; } - dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i); ec_info = priv->io + i * INFO_BLOCK_SIZE; - dev_dbg(dev, "EtherCAT master revision: %d\n", - ioread16(ec_info + INFO_BLOCK_REV)); priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); - dev_dbg(dev, "EtherCAT master tx dma channel: %d\n", - priv->tx_dma_chan); - priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); - dev_dbg(dev, "EtherCAT master rx dma channel: %d\n", - priv->rx_dma_chan); priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); - dev_dbg(dev, - "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n", - priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io); - return 0; } @@ -350,8 +295,6 @@ static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, struct tx_desc *desc; unsigned len; - dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n"); - desc = &priv->tx_descs[priv->tx_dnext]; skb_copy_and_csum_dev(skb, desc->data); @@ -366,15 +309,12 @@ static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { - /* Make sure that update updates to tx_dnext are perceived + /* Make sure that updates to tx_dnext are perceived * by timer routine. */ smp_wmb(); netif_stop_queue(net_dev); - - dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n"); - ec_bhf_print_status(priv); } priv->stat_tx_bytes += len; @@ -397,7 +337,6 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, mask = ioread32(priv->dma_io + offset); mask &= DMA_WINDOW_SIZE_MASK; - dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel); /* We want to allocate a chunk of memory that is: * - aligned to the mask we just read @@ -408,12 +347,10 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, buf->len = min_t(int, ~mask + 1, size); buf->alloc_len = 2 * buf->len; - dev_dbg(dev, "Allocating %d bytes for channel %d", - (int)buf->alloc_len, channel); buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, GFP_KERNEL); if (buf->alloc == NULL) { - dev_info(dev, "Failed to allocate buffer\n"); + dev_err(dev, "Failed to allocate buffer\n"); return -ENOMEM; } @@ -422,8 +359,6 @@ static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, iowrite32(0, priv->dma_io + offset + 4); iowrite32(buf->buf_phys, priv->dma_io + offset); - dev_dbg(dev, "Buffer: %x and read from dev: %x", - (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset)); return 0; } @@ -433,7 +368,7 @@ static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv) int i = 0; priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); - priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf; + priv->tx_descs = (struct tx_desc *)priv->tx_buf.buf; priv->tx_dnext = 0; for (i = 0; i < priv->tx_dcount; i++) @@ -445,7 +380,7 @@ static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv) int i; priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); - priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf; + priv->rx_descs = (struct rx_desc *)priv->rx_buf.buf; priv->rx_dnext = 0; for (i = 0; i < priv->rx_dcount; i++) { @@ -469,8 +404,6 @@ static int ec_bhf_open(struct net_device *net_dev) struct device *dev = PRIV_TO_DEV(priv); int err = 0; - dev_info(dev, "Opening device\n"); - ec_bhf_reset(priv); err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, @@ -481,20 +414,13 @@ static int ec_bhf_open(struct net_device *net_dev) } ec_bhf_setup_rx_descs(priv); - dev_info(dev, "RX buffer allocated, address: %x\n", - (unsigned)priv->rx_buf.buf_phys); - err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, FIFO_SIZE * sizeof(struct tx_desc)); if (err) { dev_err(dev, "Failed to allocate tx buffer\n"); goto error_rx_free; } - dev_dbg(dev, "TX buffer allocated, addres: %x\n", - (unsigned)priv->tx_buf.buf_phys); - iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); - ec_bhf_setup_tx_descs(priv); netif_start_queue(net_dev); @@ -504,10 +430,6 @@ static int ec_bhf_open(struct net_device *net_dev) hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), HRTIMER_MODE_REL); - dev_info(PRIV_TO_DEV(priv), "Device open\n"); - - ec_bhf_print_status(priv); - return 0; error_rx_free: @@ -640,9 +562,6 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); - dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n", - net_dev->dev_addr); - err = register_netdev(net_dev); if (err < 0) goto err_free_net_dev; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 43e08d0bc3d3..9a2d75235e89 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -86,6 +86,8 @@ static inline char *nic_name(struct pci_dev *pdev) #define BE_MAX_JUMBO_FRAME_SIZE 9018 #define BE_MIN_MTU 256 +#define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \ + (ETH_HLEN + ETH_FCS_LEN)) #define BE_NUM_VLANS_SUPPORTED 64 #define BE_MAX_EQD 128u @@ -112,7 +114,6 @@ static inline char *nic_name(struct pci_dev *pdev) #define MAX_ROCE_EQS 5 #define MAX_MSIX_VECTORS 32 #define MIN_MSIX_VECTORS 1 -#define BE_TX_BUDGET 256 #define BE_NAPI_WEIGHT 64 #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) @@ -198,7 +199,6 @@ struct be_eq_obj { u8 idx; /* array index */ u8 msix_idx; - u16 tx_budget; u16 spurious_intr; struct napi_struct napi; struct be_adapter *adapter; @@ -248,6 +248,13 @@ struct be_tx_stats { ulong tx_jiffies; u32 tx_stops; u32 tx_drv_drops; /* pkts dropped by driver */ + /* the error counters are described in be_ethtool.c */ + u32 tx_hdr_parse_err; + u32 tx_dma_err; + u32 tx_tso_err; + u32 tx_spoof_check_err; + u32 tx_qinq_err; + u32 tx_internal_parity_err; struct u64_stats_sync sync; struct u64_stats_sync sync_compl; }; @@ -316,6 +323,7 @@ struct be_rx_obj { struct be_drv_stats { u32 be_on_die_temperature; u32 eth_red_drops; + u32 dma_map_errors; u32 rx_drops_no_pbuf; u32 rx_drops_no_txpb; u32 rx_drops_no_erx_descr; @@ -399,9 +407,9 @@ struct phy_info { u16 auto_speeds_supported; u16 fixed_speeds_supported; int link_speed; - u32 dac_cable_len; u32 advertising; u32 supported; + u8 cable_type; }; struct be_resources { @@ -613,6 +621,10 @@ extern const struct ethtool_ops be_ethtool_ops; for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\ i += adapter->num_evt_qs, rxo += adapter->num_evt_qs) +#define for_all_tx_queues_on_eq(adapter, eqo, txo, i) \ + for (i = eqo->idx, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;\ + i += adapter->num_evt_qs, txo += adapter->num_evt_qs) + #define is_mcc_eqo(eqo) (eqo->idx == 0) #define mcc_eqo(adapter) (&adapter->eq_obj[0]) @@ -661,6 +673,18 @@ static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset) amap_mask(sizeof(((_struct *)0)->field)), \ AMAP_BIT_OFFSET(_struct, field)) +#define GET_RX_COMPL_V0_BITS(field, ptr) \ + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, field, ptr) + +#define GET_RX_COMPL_V1_BITS(field, ptr) \ + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, field, ptr) + +#define GET_TX_COMPL_BITS(field, ptr) \ + AMAP_GET_BITS(struct amap_eth_tx_compl, field, ptr) + +#define SET_TX_WRB_HDR_BITS(field, ptr, val) \ + AMAP_SET_BITS(struct amap_eth_hdr_wrb, field, ptr, val) + #define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len) #define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len) static inline void swap_dws(void *wrb, int len) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 4370ec1952ac..fead5c65a4f0 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -209,7 +209,6 @@ static int be_mcc_compl_process(struct be_adapter *adapter, if (base_status != MCC_STATUS_SUCCESS && !be_skip_err_log(opcode, base_status, addl_status)) { - if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { dev_warn(&adapter->pdev->dev, "VF is not privileged to issue opcode %d-%d\n", @@ -309,8 +308,6 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter, be_async_grp5_pvid_state_process(adapter, compl); break; default: - dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n", - event_type); break; } } @@ -319,7 +316,7 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter, struct be_mcc_compl *cmp) { u8 event_type = 0; - struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp; + struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp; event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & ASYNC_EVENT_TYPE_MASK; @@ -595,6 +592,7 @@ static int lancer_wait_ready(struct be_adapter *adapter) static bool lancer_provisioning_error(struct be_adapter *adapter) { u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); @@ -677,7 +675,6 @@ int be_fw_wait_ready(struct be_adapter *adapter) return -1; } - static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) { return &wrb->payload.sgl[0]; @@ -924,6 +921,7 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); + eqo->q.id = le16_to_cpu(resp->eq_id); eqo->msix_idx = (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; @@ -958,7 +956,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, if (permanent) { req->permanent = 1; } else { - req->if_id = cpu_to_le16((u16) if_handle); + req->if_id = cpu_to_le16((u16)if_handle); req->pmac_id = cpu_to_le32(pmac_id); req->permanent = 0; } @@ -966,6 +964,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); + memcpy(mac_addr, resp->mac.addr, ETH_ALEN); } @@ -1002,6 +1001,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); + *pmac_id = le32_to_cpu(resp->pmac_id); } @@ -1034,7 +1034,8 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL); + OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), + wrb, NULL); req->hdr.domain = dom; req->if_id = cpu_to_le32(if_id); @@ -1106,6 +1107,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); + cq->id = le16_to_cpu(resp->cq_id); cq->created = true; } @@ -1118,6 +1120,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, static u32 be_encoded_q_len(int q_len) { u32 len_encoded = fls(q_len); /* log2(len) + 1 */ + if (len_encoded == 16) len_encoded = 0; return len_encoded; @@ -1173,6 +1176,7 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter, status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); + mccq->id = le16_to_cpu(resp->id); mccq->created = true; } @@ -1216,6 +1220,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter, status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); + mccq->id = le16_to_cpu(resp->id); mccq->created = true; } @@ -1274,6 +1279,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) status = be_cmd_notify_wait(adapter, &wrb); if (!status) { struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); + txq->id = le16_to_cpu(resp->cid); if (ver == 2) txo->db_offset = le32_to_cpu(resp->db_offset); @@ -1318,6 +1324,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); + rxq->id = le16_to_cpu(resp->id); rxq->created = true; *rss_id = resp->rss_id; @@ -1431,6 +1438,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, status = be_cmd_notify_wait(adapter, &wrb); if (!status) { struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); + *if_handle = le32_to_cpu(resp->interface_id); /* Hack to retrieve VF's pmac-id on BE3 */ @@ -1514,7 +1522,6 @@ err: int lancer_cmd_get_pport_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { - struct be_mcc_wrb *wrb; struct lancer_cmd_req_pport_stats *req; int status = 0; @@ -1605,6 +1612,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_link_status *resp = embedded_payload(wrb); + if (link_speed) { *link_speed = resp->link_speed ? le16_to_cpu(resp->link_speed) * 10 : @@ -1672,6 +1680,7 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); + if (log_size && resp->log_size) *log_size = le32_to_cpu(resp->log_size) - sizeof(u32); @@ -1681,17 +1690,17 @@ err: return status; } -void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) +int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) { struct be_dma_mem get_fat_cmd; struct be_mcc_wrb *wrb; struct be_cmd_req_get_fat *req; u32 offset = 0, total_size, buf_size, log_offset = sizeof(u32), payload_len; - int status; + int status = 0; if (buf_len == 0) - return; + return -EIO; total_size = buf_len; @@ -1700,10 +1709,9 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) get_fat_cmd.size, &get_fat_cmd.dma); if (!get_fat_cmd.va) { - status = -ENOMEM; dev_err(&adapter->pdev->dev, - "Memory allocation failure while retrieving FAT data\n"); - return; + "Memory allocation failure while reading FAT data\n"); + return -ENOMEM; } spin_lock_bh(&adapter->mcc_lock); @@ -1732,6 +1740,7 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; + memcpy(buf + offset, resp->data_buffer, le32_to_cpu(resp->read_log_length)); @@ -1746,6 +1755,7 @@ err: pci_free_consistent(adapter->pdev, get_fat_cmd.size, get_fat_cmd.va, get_fat_cmd.dma); spin_unlock_bh(&adapter->mcc_lock); + return status; } /* Uses synchronous mcc */ @@ -1771,8 +1781,11 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); - strcpy(adapter->fw_ver, resp->firmware_version_string); - strcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string); + + strlcpy(adapter->fw_ver, resp->firmware_version_string, + sizeof(adapter->fw_ver)); + strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string, + sizeof(adapter->fw_on_flash)); } err: spin_unlock_bh(&adapter->mcc_lock); @@ -1782,8 +1795,8 @@ err: /* set the EQ delay interval of an EQ to specified value * Uses async mcc */ -int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, - int num) +static int __be_cmd_modify_eqd(struct be_adapter *adapter, + struct be_set_eqd *set_eqd, int num) { struct be_mcc_wrb *wrb; struct be_cmd_req_modify_eq_delay *req; @@ -1816,6 +1829,25 @@ err: return status; } +int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, + int num) +{ + int num_eqs, i = 0; + + if (lancer_chip(adapter) && num > 8) { + while (num) { + num_eqs = min(num, 8); + __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); + i += num_eqs; + num -= num_eqs; + } + } else { + __be_cmd_modify_eqd(adapter, set_eqd, num); + } + + return 0; +} + /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, u32 num) @@ -1879,8 +1911,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_MCAST_PROMISCUOUS); } else if (flags & IFF_ALLMULTI) { - req->if_flags_mask = req->if_flags = - cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); + req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); + req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); } else if (flags & BE_FLAGS_VLAN_PROMISC) { req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS); @@ -1891,8 +1923,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) struct netdev_hw_addr *ha; int i = 0; - req->if_flags_mask = req->if_flags = - cpu_to_le32(BE_IF_FLAGS_MULTICAST); + req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_MULTICAST); + req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST); /* Reset mcast promisc mode if already set by setting mask * and not setting flags field @@ -1947,6 +1979,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); + req->hdr.version = 1; req->tx_flow_control = cpu_to_le16((u16)tx_fc); req->rx_flow_control = cpu_to_le16((u16)rx_fc); @@ -1954,6 +1987,10 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) err: spin_unlock_bh(&adapter->mcc_lock); + + if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) + return -EOPNOTSUPP; + return status; } @@ -1985,6 +2022,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) if (!status) { struct be_cmd_resp_get_flow_control *resp = embedded_payload(wrb); + *tx_fc = le16_to_cpu(resp->tx_flow_control); *rx_fc = le16_to_cpu(resp->rx_flow_control); } @@ -2014,10 +2052,14 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter) status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); + adapter->port_num = le32_to_cpu(resp->phys_port); adapter->function_mode = le32_to_cpu(resp->function_mode); adapter->function_caps = le32_to_cpu(resp->function_caps); adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; + dev_info(&adapter->pdev->dev, + "FW config: function_mode=0x%x, function_caps=0x%x\n", + adapter->function_mode, adapter->function_caps); } mutex_unlock(&adapter->mbox_lock); @@ -2159,6 +2201,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) if (!status) { struct be_cmd_resp_get_beacon_state *resp = embedded_payload(wrb); + *state = resp->beacon_state; } @@ -2167,6 +2210,53 @@ err: return status; } +/* Uses sync mcc */ +int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, + u8 page_num, u8 *data) +{ + struct be_dma_mem cmd; + struct be_mcc_wrb *wrb; + struct be_cmd_req_port_type *req; + int status; + + if (page_num > TR_PAGE_A2) + return -EINVAL; + + cmd.size = sizeof(struct be_cmd_resp_port_type); + cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + if (!cmd.va) { + dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); + return -ENOMEM; + } + memset(cmd.va, 0, cmd.size); + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = cmd.va; + + be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_READ_TRANSRECV_DATA, + cmd.size, wrb, &cmd); + + req->port = cpu_to_le32(adapter->hba_port_num); + req->page_num = cpu_to_le32(page_num); + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_port_type *resp = cmd.va; + + memcpy(data, resp->page_data, PAGE_DATA_LEN); + } +err: + spin_unlock_bh(&adapter->mcc_lock); + pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + return status; +} + int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_written, @@ -2207,7 +2297,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, be_dws_cpu_to_le(ctxt, sizeof(req->context)); req->write_offset = cpu_to_le32(data_offset); - strcpy(req->object_name, obj_name); + strlcpy(req->object_name, obj_name, sizeof(req->object_name)); req->descriptor_count = cpu_to_le32(1); req->buf_len = cpu_to_le32(data_size); req->addr_low = cpu_to_le32((cmd->dma + @@ -2240,6 +2330,31 @@ err_unlock: return status; } +int be_cmd_query_cable_type(struct be_adapter *adapter) +{ + u8 page_data[PAGE_DATA_LEN]; + int status; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + page_data); + if (!status) { + switch (adapter->phy.interface_type) { + case PHY_TYPE_QSFP: + adapter->phy.cable_type = + page_data[QSFP_PLUS_CABLE_TYPE_OFFSET]; + break; + case PHY_TYPE_SFP_PLUS_10GB: + adapter->phy.cable_type = + page_data[SFP_PLUS_CABLE_TYPE_OFFSET]; + break; + default: + adapter->phy.cable_type = 0; + break; + } + } + return status; +} + int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) { struct lancer_cmd_req_delete_object *req; @@ -2260,7 +2375,7 @@ int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name) OPCODE_COMMON_DELETE_OBJECT, sizeof(*req), wrb, NULL); - strcpy(req->object_name, obj_name); + strlcpy(req->object_name, obj_name, sizeof(req->object_name)); status = be_mcc_notify_wait(adapter); err: @@ -2357,7 +2472,7 @@ err_unlock: } int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, - u16 optype, int offset) + u16 optype, int offset) { struct be_mcc_wrb *wrb; struct be_cmd_read_flash_crc *req; @@ -2528,9 +2643,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, if (!status) { struct be_cmd_resp_ddrdma_test *resp; + resp = cmd->va; if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || - resp->snd_err) { + resp->snd_err) { status = -1; } } @@ -2603,6 +2719,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) if (!status) { struct be_phy_info *resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr); + adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); adapter->phy.interface_type = le16_to_cpu(resp_phy_info->interface_type); @@ -2732,6 +2849,7 @@ int be_cmd_req_native_mode(struct be_adapter *adapter) status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); + adapter->be3_native = le32_to_cpu(resp->cap_flags) & CAPABILITY_BE3_NATIVE_ERX_API; if (!adapter->be3_native) @@ -2771,6 +2889,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, if (!status) { struct be_cmd_resp_get_fn_privileges *resp = embedded_payload(wrb); + *privilege = le32_to_cpu(resp->privilege_mask); /* In UMC mode FW does not return right privileges. @@ -2918,7 +3037,6 @@ out: int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac, u32 if_handle, bool active, u32 domain) { - if (!active) be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, if_handle, domain); @@ -3102,6 +3220,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, if (!status) { struct be_cmd_resp_get_hsw_config *resp = embedded_payload(wrb); + be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, pvid, &resp->context); @@ -3161,7 +3280,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; - resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; + + resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; adapter->wol_cap = resp->wol_settings; if (adapter->wol_cap & BE_WOL_CAP) @@ -3197,6 +3317,7 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); + for (j = 0; j < num_modes; j++) { if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) cfgs->module[i].trace_lvl[j].dbg_lvl = @@ -3233,6 +3354,7 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) if (!status) { cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); + for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) level = cfgs->module[0].trace_lvl[j].dbg_lvl; @@ -3329,6 +3451,7 @@ int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name) status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); + *port_name = resp->port_name[adapter->hba_port_num]; } else { *port_name = adapter->hba_port_num + '0'; @@ -3952,6 +4075,7 @@ int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) if (!status) { struct be_cmd_resp_get_active_profile *resp = embedded_payload(wrb); + *profile_id = le16_to_cpu(resp->active_profile_id); } @@ -4004,7 +4128,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, { struct be_adapter *adapter = netdev_priv(netdev_handle); struct be_mcc_wrb *wrb; - struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload; + struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload; struct be_cmd_req_hdr *req; struct be_cmd_resp_hdr *resp; int status; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5284b825bba2..eb5085d6794f 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -57,7 +57,8 @@ enum mcc_base_status { MCC_STATUS_ILLEGAL_FIELD = 3, MCC_STATUS_INSUFFICIENT_BUFFER = 4, MCC_STATUS_UNAUTHORIZED_REQUEST = 5, - MCC_STATUS_NOT_SUPPORTED = 66 + MCC_STATUS_NOT_SUPPORTED = 66, + MCC_STATUS_FEATURE_NOT_SUPPORTED = 68 }; /* Additional status */ @@ -1004,8 +1005,8 @@ struct be_cmd_resp_link_status { /* Identifies the type of port attached to NIC */ struct be_cmd_req_port_type { struct be_cmd_req_hdr hdr; - u32 page_num; - u32 port; + __le32 page_num; + __le32 port; }; enum { @@ -1013,28 +1014,23 @@ enum { TR_PAGE_A2 = 0xa2 }; +/* From SFF-8436 QSFP+ spec */ +#define QSFP_PLUS_CABLE_TYPE_OFFSET 0x83 +#define QSFP_PLUS_CR4_CABLE 0x8 +#define QSFP_PLUS_SR4_CABLE 0x4 +#define QSFP_PLUS_LR4_CABLE 0x2 + +/* From SFF-8472 spec */ +#define SFP_PLUS_SFF_8472_COMP 0x5E +#define SFP_PLUS_CABLE_TYPE_OFFSET 0x8 +#define SFP_PLUS_COPPER_CABLE 0x4 + +#define PAGE_DATA_LEN 256 struct be_cmd_resp_port_type { struct be_cmd_resp_hdr hdr; u32 page_num; u32 port; - struct data { - u8 identifier; - u8 identifier_ext; - u8 connector; - u8 transceiver[8]; - u8 rsvd0[3]; - u8 length_km; - u8 length_hm; - u8 length_om1; - u8 length_om2; - u8 length_cu; - u8 length_cu_m; - u8 vendor_name[16]; - u8 rsvd; - u8 vendor_oui[3]; - u8 vendor_pn[16]; - u8 vendor_rev[4]; - } data; + u8 page_data[PAGE_DATA_LEN]; }; /******************** Get FW Version *******************/ @@ -1367,6 +1363,9 @@ enum { PHY_TYPE_BASET_1GB, PHY_TYPE_BASEX_1GB, PHY_TYPE_SGMII, + PHY_TYPE_QSFP, + PHY_TYPE_KR4_40GB, + PHY_TYPE_KR2_20GB, PHY_TYPE_DISABLED = 255 }; @@ -1375,6 +1374,8 @@ enum { #define BE_SUPPORTED_SPEED_100MBPS 2 #define BE_SUPPORTED_SPEED_1GBPS 4 #define BE_SUPPORTED_SPEED_10GBPS 8 +#define BE_SUPPORTED_SPEED_20GBPS 0x10 +#define BE_SUPPORTED_SPEED_40GBPS 0x20 #define BE_AN_EN 0x2 #define BE_PAUSE_SYM_EN 0x80 @@ -2066,6 +2067,9 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, u8 status, u8 state); int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state); +int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, + u8 page_num, u8 *data); +int be_cmd_query_cable_type(struct be_adapter *adapter); int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 flash_oper, u32 flash_opcode, u32 buf_size); int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, @@ -2101,7 +2105,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter); int be_cmd_get_cntl_attributes(struct be_adapter *adapter); int be_cmd_req_native_mode(struct be_adapter *adapter); int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); -void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); +int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, u32 domain); int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 0cd3311409a8..e42a791c1835 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -78,6 +78,11 @@ static const struct be_ethtool_stat et_stats[] = { * fifo must never overflow. */ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)}, + /* Received packets dropped when the RX block runs out of space in + * one of its input FIFOs. This could happen due a long burst of + * minimum-sized (64b) frames in the receive path. + * This counter may also be erroneously incremented rarely. + */ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)}, {DRVSTAT_INFO(rx_ip_checksum_errs)}, {DRVSTAT_INFO(rx_tcp_checksum_errs)}, @@ -114,6 +119,8 @@ static const struct be_ethtool_stat et_stats[] = { * is more than 9018 bytes */ {DRVSTAT_INFO(rx_drops_mtu)}, + /* Number of dma mapping errors */ + {DRVSTAT_INFO(dma_map_errors)}, /* Number of packets dropped due to random early drop function */ {DRVSTAT_INFO(eth_red_drops)}, {DRVSTAT_INFO(be_on_die_temperature)}, @@ -123,6 +130,7 @@ static const struct be_ethtool_stat et_stats[] = { {DRVSTAT_INFO(roce_drops_payload_len)}, {DRVSTAT_INFO(roce_drops_crc)} }; + #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts @@ -145,6 +153,7 @@ static const struct be_ethtool_stat et_rx_stats[] = { */ {DRVSTAT_RX_INFO(rx_drops_no_frags)} }; + #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) /* Stats related to multi TX queues: get_stats routine assumes compl is the @@ -152,6 +161,34 @@ static const struct be_ethtool_stat et_rx_stats[] = { */ static const struct be_ethtool_stat et_tx_stats[] = { {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */ + /* This counter is incremented when the HW encounters an error while + * parsing the packet header of an outgoing TX request. This counter is + * applicable only for BE2, BE3 and Skyhawk based adapters. + */ + {DRVSTAT_TX_INFO(tx_hdr_parse_err)}, + /* This counter is incremented when an error occurs in the DMA + * operation associated with the TX request from the host to the device. + */ + {DRVSTAT_TX_INFO(tx_dma_err)}, + /* This counter is incremented when MAC or VLAN spoof checking is + * enabled on the interface and the TX request fails the spoof check + * in HW. + */ + {DRVSTAT_TX_INFO(tx_spoof_check_err)}, + /* This counter is incremented when the HW encounters an error while + * performing TSO offload. This counter is applicable only for Lancer + * adapters. + */ + {DRVSTAT_TX_INFO(tx_tso_err)}, + /* This counter is incremented when the HW detects Q-in-Q style VLAN + * tagging in a packet and such tagging is not expected on the outgoing + * interface. This counter is applicable only for Lancer adapters. + */ + {DRVSTAT_TX_INFO(tx_qinq_err)}, + /* This counter is incremented when the HW detects parity errors in the + * packet data. This counter is applicable only for Lancer adapters. + */ + {DRVSTAT_TX_INFO(tx_internal_parity_err)}, {DRVSTAT_TX_INFO(tx_bytes)}, {DRVSTAT_TX_INFO(tx_pkts)}, /* Number of skbs queued for trasmission by the driver */ @@ -165,6 +202,7 @@ static const struct be_ethtool_stat et_tx_stats[] = { /* Pkts dropped in the driver's transmit path */ {DRVSTAT_TX_INFO(tx_drv_drops)} }; + #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) static const char et_self_tests[][ETH_GSTRING_LEN] = { @@ -239,7 +277,7 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, while ((total_read_len < buf_len) && !eof) { chunk_size = min_t(u32, (buf_len - total_read_len), - LANCER_READ_FILE_CHUNK); + LANCER_READ_FILE_CHUNK); chunk_size = ALIGN(chunk_size, 4); status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, total_read_len, file_name, @@ -298,7 +336,6 @@ static int be_get_coalesce(struct net_device *netdev, struct be_adapter *adapter = netdev_priv(netdev); struct be_aic_obj *aic = &adapter->aic_obj[0]; - et->rx_coalesce_usecs = aic->prev_eqd; et->rx_coalesce_usecs_high = aic->max_eqd; et->rx_coalesce_usecs_low = aic->min_eqd; @@ -440,18 +477,27 @@ static int be_get_sset_count(struct net_device *netdev, int stringset) } } -static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len) +static u32 be_get_port_type(struct be_adapter *adapter) { u32 port; - switch (phy_type) { + switch (adapter->phy.interface_type) { case PHY_TYPE_BASET_1GB: case PHY_TYPE_BASEX_1GB: case PHY_TYPE_SGMII: port = PORT_TP; break; case PHY_TYPE_SFP_PLUS_10GB: - port = dac_cable_len ? PORT_DA : PORT_FIBRE; + if (adapter->phy.cable_type & SFP_PLUS_COPPER_CABLE) + port = PORT_DA; + else + port = PORT_FIBRE; + break; + case PHY_TYPE_QSFP: + if (adapter->phy.cable_type & QSFP_PLUS_CR4_CABLE) + port = PORT_DA; + else + port = PORT_FIBRE; break; case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: @@ -467,11 +513,11 @@ static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len) return port; } -static u32 convert_to_et_setting(u32 if_type, u32 if_speeds) +static u32 convert_to_et_setting(struct be_adapter *adapter, u32 if_speeds) { u32 val = 0; - switch (if_type) { + switch (adapter->phy.interface_type) { case PHY_TYPE_BASET_1GB: case PHY_TYPE_BASEX_1GB: case PHY_TYPE_SGMII: @@ -490,10 +536,38 @@ static u32 convert_to_et_setting(u32 if_type, u32 if_speeds) if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) val |= SUPPORTED_10000baseKX4_Full; break; + case PHY_TYPE_KR2_20GB: + val |= SUPPORTED_Backplane; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseKR_Full; + if (if_speeds & BE_SUPPORTED_SPEED_20GBPS) + val |= SUPPORTED_20000baseKR2_Full; + break; case PHY_TYPE_KR_10GB: val |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; break; + case PHY_TYPE_KR4_40GB: + val |= SUPPORTED_Backplane; + if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) + val |= SUPPORTED_10000baseKR_Full; + if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) + val |= SUPPORTED_40000baseKR4_Full; + break; + case PHY_TYPE_QSFP: + if (if_speeds & BE_SUPPORTED_SPEED_40GBPS) { + switch (adapter->phy.cable_type) { + case QSFP_PLUS_CR4_CABLE: + val |= SUPPORTED_40000baseCR4_Full; + break; + case QSFP_PLUS_LR4_CABLE: + val |= SUPPORTED_40000baseLR4_Full; + break; + default: + val |= SUPPORTED_40000baseSR4_Full; + break; + } + } case PHY_TYPE_SFP_PLUS_10GB: case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: @@ -534,8 +608,6 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) int status; u32 auto_speeds; u32 fixed_speeds; - u32 dac_cable_len; - u16 interface_type; if (adapter->phy.link_speed < 0) { status = be_cmd_link_status_query(adapter, &link_speed, @@ -546,21 +618,19 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) status = be_cmd_get_phy_info(adapter); if (!status) { - interface_type = adapter->phy.interface_type; auto_speeds = adapter->phy.auto_speeds_supported; fixed_speeds = adapter->phy.fixed_speeds_supported; - dac_cable_len = adapter->phy.dac_cable_len; + + be_cmd_query_cable_type(adapter); ecmd->supported = - convert_to_et_setting(interface_type, + convert_to_et_setting(adapter, auto_speeds | fixed_speeds); ecmd->advertising = - convert_to_et_setting(interface_type, - auto_speeds); + convert_to_et_setting(adapter, auto_speeds); - ecmd->port = be_get_port_type(interface_type, - dac_cable_len); + ecmd->port = be_get_port_type(adapter); if (adapter->phy.auto_speeds_supported) { ecmd->supported |= SUPPORTED_Autoneg; @@ -614,8 +684,10 @@ static void be_get_ringparam(struct net_device *netdev, { struct be_adapter *adapter = netdev_priv(netdev); - ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len; - ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len; + ring->rx_max_pending = adapter->rx_obj[0].q.len; + ring->rx_pending = adapter->rx_obj[0].q.len; + ring->tx_max_pending = adapter->tx_obj[0].q.len; + ring->tx_pending = adapter->tx_obj[0].q.len; } static void @@ -641,7 +713,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) - dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); + dev_warn(&adapter->pdev->dev, "Pause param set failed\n"); return be_cmd_status(status); } @@ -907,8 +979,6 @@ static void be_set_msg_level(struct net_device *netdev, u32 level) FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL); adapter->msg_enable = level; - - return; } static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type) @@ -1127,6 +1197,7 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir, if (indir) { struct be_rx_obj *rxo; + for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) { j = indir[i]; rxo = &adapter->rx_obj[j]; @@ -1142,8 +1213,8 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir, hkey = adapter->rss_info.rss_hkey; rc = be_cmd_rss_config(adapter, rsstable, - adapter->rss_info.rss_flags, - RSS_INDIR_TABLE_LEN, hkey); + adapter->rss_info.rss_flags, + RSS_INDIR_TABLE_LEN, hkey); if (rc) { adapter->rss_info.rss_flags = RSS_ENABLE_NONE; return -EIO; @@ -1154,6 +1225,58 @@ static int be_set_rxfh(struct net_device *netdev, const u32 *indir, return 0; } +static int be_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct be_adapter *adapter = netdev_priv(netdev); + u8 page_data[PAGE_DATA_LEN]; + int status; + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + page_data); + if (!status) { + if (!page_data[SFP_PLUS_SFF_8472_COMP]) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = PAGE_DATA_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = 2 * PAGE_DATA_LEN; + } + } + return be_cmd_status(status); +} + +static int be_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct be_adapter *adapter = netdev_priv(netdev); + int status; + + if (!check_privilege(adapter, MAX_PRIVILEGES)) + return -EOPNOTSUPP; + + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, + data); + if (status) + goto err; + + if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) { + status = be_cmd_read_port_transceiver_data(adapter, + TR_PAGE_A2, + data + + PAGE_DATA_LEN); + if (status) + goto err; + } + if (eeprom->offset) + memcpy(data, data + eeprom->offset, eeprom->len); +err: + return be_cmd_status(status); +} + const struct ethtool_ops be_ethtool_ops = { .get_settings = be_get_settings, .get_drvinfo = be_get_drvinfo, @@ -1185,5 +1308,7 @@ const struct ethtool_ops be_ethtool_ops = { .get_rxfh = be_get_rxfh, .set_rxfh = be_set_rxfh, .get_channels = be_get_channels, - .set_channels = be_set_channels + .set_channels = be_set_channels, + .get_module_info = be_get_module_info, + .get_module_eeprom = be_get_module_eeprom }; diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h index 8840c64aaeca..295ee0835ba0 100644 --- a/drivers/net/ethernet/emulex/benet/be_hw.h +++ b/drivers/net/ethernet/emulex/benet/be_hw.h @@ -315,6 +315,18 @@ struct be_eth_hdr_wrb { u32 dw[4]; }; +/********* Tx Compl Status Encoding *********/ +#define BE_TX_COMP_HDR_PARSE_ERR 0x2 +#define BE_TX_COMP_NDMA_ERR 0x3 +#define BE_TX_COMP_ACL_ERR 0x5 + +#define LANCER_TX_COMP_LSO_ERR 0x1 +#define LANCER_TX_COMP_HSW_DROP_MAC_ERR 0x3 +#define LANCER_TX_COMP_HSW_DROP_VLAN_ERR 0x5 +#define LANCER_TX_COMP_QINQ_ERR 0x7 +#define LANCER_TX_COMP_PARITY_ERR 0xb +#define LANCER_TX_COMP_DMA_ERR 0xd + /* TX Compl Queue Descriptor */ /* Pseudo amap definition for eth_tx_compl in which each bit of the diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 93ff8ef39352..9a18e7930b31 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -86,6 +86,7 @@ static const char * const ue_status_low_desc[] = { "JTAG ", "MPU_INTPEND " }; + /* UE Status High CSR */ static const char * const ue_status_hi_desc[] = { "LPCMEMHOST", @@ -122,10 +123,10 @@ static const char * const ue_status_hi_desc[] = { "Unknown" }; - static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { struct be_dma_mem *mem = &q->dma_mem; + if (mem->va) { dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, mem->dma); @@ -187,6 +188,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable) static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) { u32 val = 0; + val |= qid & DB_RQ_RING_ID_MASK; val |= posted << DB_RQ_NUM_POSTED_SHIFT; @@ -198,6 +200,7 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo, u16 posted) { u32 val = 0; + val |= txo->q.id & DB_TXULP_RING_ID_MASK; val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; @@ -209,6 +212,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid, bool arm, bool clear_int, u16 num_popped) { u32 val = 0; + val |= qid & DB_EQ_RING_ID_MASK; val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT); @@ -227,6 +231,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid, void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) { u32 val = 0; + val |= qid & DB_CQ_RING_ID_MASK; val |= ((qid & DB_CQ_RING_ID_EXT_MASK) << DB_CQ_RING_ID_EXT_MASK_SHIFT); @@ -488,7 +493,6 @@ static void populate_be_v2_stats(struct be_adapter *adapter) static void populate_lancer_stats(struct be_adapter *adapter) { - struct be_drv_stats *drvs = &adapter->drv_stats; struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter); @@ -588,6 +592,7 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, for_all_rx_queues(adapter, rxo, i) { const struct be_rx_stats *rx_stats = rx_stats(rxo); + do { start = u64_stats_fetch_begin_irq(&rx_stats->sync); pkts = rx_stats(rxo)->rx_pkts; @@ -602,6 +607,7 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, for_all_tx_queues(adapter, txo, i) { const struct be_tx_stats *tx_stats = tx_stats(txo); + do { start = u64_stats_fetch_begin_irq(&tx_stats->sync); pkts = tx_stats(txo)->tx_pkts; @@ -738,38 +744,37 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, memset(hdr, 0, sizeof(*hdr)); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1); + SET_TX_WRB_HDR_BITS(crc, hdr, 1); if (skb_is_gso(skb)) { - AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, - hdr, skb_shinfo(skb)->gso_size); + SET_TX_WRB_HDR_BITS(lso, hdr, 1); + SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size); if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) - AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); + SET_TX_WRB_HDR_BITS(lso6, hdr, 1); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->encapsulation) { - AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1); + SET_TX_WRB_HDR_BITS(ipcs, hdr, 1); proto = skb_inner_ip_proto(skb); } else { proto = skb_ip_proto(skb); } if (proto == IPPROTO_TCP) - AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); + SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1); else if (proto == IPPROTO_UDP) - AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1); + SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); } if (vlan_tx_tag_present(skb)) { - AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1); + SET_TX_WRB_HDR_BITS(vlan, hdr, 1); vlan_tag = be_get_tx_vlan_tag(adapter, skb); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag); + SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); } /* To skip HW VLAN tagging: evt = 1, compl = 0 */ - AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt); - AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); + SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan); + SET_TX_WRB_HDR_BITS(event, hdr, 1); + SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); + SET_TX_WRB_HDR_BITS(len, hdr, len); } static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, @@ -808,6 +813,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, if (skb->len > skb->data_len) { int len = skb_headlen(skb); + busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; @@ -821,6 +827,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + busaddr = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) @@ -850,6 +857,7 @@ dma_err: unmap_tx_frag(dev, wrb, map_single); map_single = false; copied -= wrb->frag_len; + adapter->drv_stats.dma_map_errors++; queue_head_inc(txq); } return 0; @@ -910,7 +918,7 @@ static bool be_ipv6_exthdr_check(struct sk_buff *skb) if (ip6h->nexthdr != NEXTHDR_TCP && ip6h->nexthdr != NEXTHDR_UDP) { struct ipv6_opt_hdr *ehdr = - (struct ipv6_opt_hdr *) (skb->data + offset); + (struct ipv6_opt_hdr *)(skb->data + offset); /* offending pkt: 2nd byte following IPv6 hdr is 0xff */ if (ehdr->hdrlen == 0xff) @@ -974,8 +982,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, * skip HW tagging is not enabled by FW. */ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && - (adapter->pvid || adapter->qnq_vid) && - !qnq_async_evt_rcvd(adapter))) + (adapter->pvid || adapter->qnq_vid) && + !qnq_async_evt_rcvd(adapter))) goto tx_drop; /* Manual VLAN tag insertion to prevent: @@ -1073,15 +1081,15 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) static int be_change_mtu(struct net_device *netdev, int new_mtu) { struct be_adapter *adapter = netdev_priv(netdev); - if (new_mtu < BE_MIN_MTU || - new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) { - dev_info(&adapter->pdev->dev, - "MTU must be between %d and %d bytes\n", - BE_MIN_MTU, - (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); + struct device *dev = &adapter->pdev->dev; + + if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) { + dev_info(dev, "MTU must be between %d and %d bytes\n", + BE_MIN_MTU, BE_MAX_MTU); return -EINVAL; } - dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", + + dev_info(dev, "MTU changed from %d to %d bytes\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; return 0; @@ -1093,6 +1101,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu) */ static int be_vid_config(struct be_adapter *adapter) { + struct device *dev = &adapter->pdev->dev; u16 vids[BE_NUM_VLANS_SUPPORTED]; u16 num = 0, i = 0; int status = 0; @@ -1114,16 +1123,15 @@ static int be_vid_config(struct be_adapter *adapter) if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) goto set_vlan_promisc; - dev_err(&adapter->pdev->dev, - "Setting HW VLAN filtering failed.\n"); + dev_err(dev, "Setting HW VLAN filtering failed\n"); } else { if (adapter->flags & BE_FLAGS_VLAN_PROMISC) { /* hw VLAN filtering re-enabled. */ status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, OFF); if (!status) { - dev_info(&adapter->pdev->dev, - "Disabling VLAN Promiscuous mode.\n"); + dev_info(dev, + "Disabling VLAN Promiscuous mode\n"); adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; } } @@ -1137,11 +1145,10 @@ set_vlan_promisc: status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON); if (!status) { - dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n"); + dev_info(dev, "Enable VLAN Promiscuous mode\n"); adapter->flags |= BE_FLAGS_VLAN_PROMISC; } else - dev_err(&adapter->pdev->dev, - "Failed to enable VLAN Promiscuous mode.\n"); + dev_err(dev, "Failed to enable VLAN Promiscuous mode\n"); return status; } @@ -1417,6 +1424,7 @@ err: max_tx_rate, vf); return be_cmd_status(status); } + static int be_set_vf_link_state(struct net_device *netdev, int vf, int link_state) { @@ -1482,7 +1490,6 @@ static void be_eqd_update(struct be_adapter *adapter) tx_pkts = txo->stats.tx_reqs; } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); - /* Skip, if wrapped around or first calculation */ now = jiffies; if (!aic->jiffies || time_before(now, aic->jiffies) || @@ -1683,7 +1690,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi, if (netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); - skb->encapsulation = rxcp->tunneled; + skb->csum_level = rxcp->tunneled; skb_mark_napi_id(skb, napi); if (rxcp->vlanf) @@ -1741,7 +1748,7 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, if (adapter->netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); - skb->encapsulation = rxcp->tunneled; + skb->csum_level = rxcp->tunneled; skb_mark_napi_id(skb, napi); if (rxcp->vlanf) @@ -1753,65 +1760,46 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl, struct be_rx_compl_info *rxcp) { - rxcp->pkt_size = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl); - rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl); - rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl); - rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl); - rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl); - rxcp->ip_csum = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl); - rxcp->l4_csum = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl); - rxcp->ipv6 = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl); - rxcp->num_rcvd = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); - rxcp->pkt_type = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); - rxcp->rss_hash = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl); + rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl); + rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl); + rxcp->err = GET_RX_COMPL_V1_BITS(err, compl); + rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl); + rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl); + rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl); + rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl); + rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl); + rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl); + rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl); + rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl); if (rxcp->vlanf) { - rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq, - compl); - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, - vlan_tag, compl); + rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl); + rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl); } - rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); + rxcp->port = GET_RX_COMPL_V1_BITS(port, compl); rxcp->tunneled = - AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl); + GET_RX_COMPL_V1_BITS(tunneled, compl); } static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, struct be_rx_compl_info *rxcp) { - rxcp->pkt_size = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl); - rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl); - rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl); - rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl); - rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl); - rxcp->ip_csum = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl); - rxcp->l4_csum = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl); - rxcp->ipv6 = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl); - rxcp->num_rcvd = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); - rxcp->pkt_type = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); - rxcp->rss_hash = - AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl); + rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl); + rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl); + rxcp->err = GET_RX_COMPL_V0_BITS(err, compl); + rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl); + rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl); + rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl); + rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl); + rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl); + rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl); + rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl); + rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl); if (rxcp->vlanf) { - rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq, - compl); - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, - vlan_tag, compl); + rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl); + rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl); } - rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); - rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, - ip_frag, compl); + rxcp->port = GET_RX_COMPL_V0_BITS(port, compl); + rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl); } static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) @@ -1872,7 +1860,7 @@ static inline struct page *be_alloc_pages(u32 size, gfp_t gfp) * Allocate a page, split it to fragments of size rx_frag_size and post as * receive buffers to BE */ -static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) +static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; @@ -1881,10 +1869,10 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) struct device *dev = &adapter->pdev->dev; struct be_eth_rx_d *rxd; u64 page_dmaaddr = 0, frag_dmaaddr; - u32 posted, page_offset = 0; + u32 posted, page_offset = 0, notify = 0; page_info = &rxo->page_info_tbl[rxq->head]; - for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { + for (posted = 0; posted < frags_needed && !page_info->page; posted++) { if (!pagep) { pagep = be_alloc_pages(adapter->big_page_size, gfp); if (unlikely(!pagep)) { @@ -1897,7 +1885,7 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) if (dma_mapping_error(dev, page_dmaaddr)) { put_page(pagep); pagep = NULL; - rx_stats(rxo)->rx_post_fail++; + adapter->drv_stats.dma_map_errors++; break; } page_offset = 0; @@ -1940,7 +1928,11 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) atomic_add(posted, &rxq->used); if (rxo->rx_post_starved) rxo->rx_post_starved = false; - be_rxq_notify(adapter, rxq->id, posted); + do { + notify = min(256u, posted); + be_rxq_notify(adapter, rxq->id, notify); + posted -= notify; + } while (posted); } else if (atomic_read(&rxq->used) == 0) { /* Let be_worker replenish when memory is available */ rxo->rx_post_starved = true; @@ -1991,7 +1983,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter, queue_tail_inc(txq); } while (cur_index != last_index); - dev_kfree_skb_any(sent_skb); + dev_consume_skb_any(sent_skb); return num_wrbs; } @@ -2069,7 +2061,8 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) memset(page_info, 0, sizeof(*page_info)); } BUG_ON(atomic_read(&rxq->used)); - rxq->tail = rxq->head = 0; + rxq->tail = 0; + rxq->head = 0; } static void be_tx_compl_clean(struct be_adapter *adapter) @@ -2091,9 +2084,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter) num_wrbs = 0; txq = &txo->q; while ((txcp = be_tx_compl_get(&txo->cq))) { - end_idx = - AMAP_GET_BITS(struct amap_eth_tx_compl, - wrb_index, txcp); + end_idx = GET_TX_COMPL_BITS(wrb_index, txcp); num_wrbs += be_tx_compl_process(adapter, txo, end_idx); cmpl++; @@ -2164,7 +2155,6 @@ static int be_evt_queues_create(struct be_adapter *adapter) napi_hash_add(&eqo->napi); aic = &adapter->aic_obj[i]; eqo->adapter = adapter; - eqo->tx_budget = BE_TX_BUDGET; eqo->idx = i; aic->max_eqd = BE_MAX_EQD; aic->enable = true; @@ -2394,6 +2384,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, struct be_queue_info *rx_cq = &rxo->cq; struct be_rx_compl_info *rxcp; u32 work_done; + u32 frags_consumed = 0; for (work_done = 0; work_done < budget; work_done++) { rxcp = be_rx_compl_get(rxo); @@ -2426,6 +2417,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, be_rx_compl_process(rxo, napi, rxcp); loop_continue: + frags_consumed += rxcp->num_rcvd; be_rx_stats_update(rxo, rxcp); } @@ -2437,26 +2429,71 @@ loop_continue: */ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM && !rxo->rx_post_starved) - be_post_rx_frags(rxo, GFP_ATOMIC); + be_post_rx_frags(rxo, GFP_ATOMIC, + max_t(u32, MAX_RX_POST, + frags_consumed)); } return work_done; } -static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, - int budget, int idx) +static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) +{ + switch (status) { + case BE_TX_COMP_HDR_PARSE_ERR: + tx_stats(txo)->tx_hdr_parse_err++; + break; + case BE_TX_COMP_NDMA_ERR: + tx_stats(txo)->tx_dma_err++; + break; + case BE_TX_COMP_ACL_ERR: + tx_stats(txo)->tx_spoof_check_err++; + break; + } +} + +static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) +{ + switch (status) { + case LANCER_TX_COMP_LSO_ERR: + tx_stats(txo)->tx_tso_err++; + break; + case LANCER_TX_COMP_HSW_DROP_MAC_ERR: + case LANCER_TX_COMP_HSW_DROP_VLAN_ERR: + tx_stats(txo)->tx_spoof_check_err++; + break; + case LANCER_TX_COMP_QINQ_ERR: + tx_stats(txo)->tx_qinq_err++; + break; + case LANCER_TX_COMP_PARITY_ERR: + tx_stats(txo)->tx_internal_parity_err++; + break; + case LANCER_TX_COMP_DMA_ERR: + tx_stats(txo)->tx_dma_err++; + break; + } +} + +static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, + int idx) { struct be_eth_tx_compl *txcp; - int num_wrbs = 0, work_done; + int num_wrbs = 0, work_done = 0; + u32 compl_status; + u16 last_idx; - for (work_done = 0; work_done < budget; work_done++) { - txcp = be_tx_compl_get(&txo->cq); - if (!txcp) - break; - num_wrbs += be_tx_compl_process(adapter, txo, - AMAP_GET_BITS(struct - amap_eth_tx_compl, - wrb_index, txcp)); + while ((txcp = be_tx_compl_get(&txo->cq))) { + last_idx = GET_TX_COMPL_BITS(wrb_index, txcp); + num_wrbs += be_tx_compl_process(adapter, txo, last_idx); + work_done++; + + compl_status = GET_TX_COMPL_BITS(status, txcp); + if (compl_status) { + if (lancer_chip(adapter)) + lancer_update_tx_err(txo, compl_status); + else + be_update_tx_err(txo, compl_status); + } } if (work_done) { @@ -2474,7 +2511,6 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, tx_stats(txo)->tx_compl += work_done; u64_stats_update_end(&tx_stats(txo)->sync_compl); } - return (work_done < budget); /* Done */ } int be_poll(struct napi_struct *napi, int budget) @@ -2483,17 +2519,12 @@ int be_poll(struct napi_struct *napi, int budget) struct be_adapter *adapter = eqo->adapter; int max_work = 0, work, i, num_evts; struct be_rx_obj *rxo; - bool tx_done; + struct be_tx_obj *txo; num_evts = events_get(eqo); - /* Process all TXQs serviced by this EQ */ - for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) { - tx_done = be_process_tx(adapter, &adapter->tx_obj[i], - eqo->tx_budget, i); - if (!tx_done) - max_work = budget; - } + for_all_tx_queues_on_eq(adapter, eqo, txo, i) + be_process_tx(adapter, txo, i); if (be_lock_napi(eqo)) { /* This loop will iterate twice for EQ0 in which @@ -2882,7 +2913,7 @@ static int be_rx_qs_create(struct be_adapter *adapter) /* First time posting */ for_all_rx_queues(adapter, rxo, i) - be_post_rx_frags(rxo, GFP_KERNEL); + be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); return 0; } @@ -3309,10 +3340,20 @@ static void BEx_get_resources(struct be_adapter *adapter, */ if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) || !be_physfn(adapter) || (be_is_mc(adapter) && - !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) + !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) { res->max_tx_qs = 1; - else + } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { + struct be_resources super_nic_res = {0}; + + /* On a SuperNIC profile, the driver needs to use the + * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits + */ + be_cmd_get_profile_config(adapter, &super_nic_res, 0); + /* Some old versions of BE3 FW don't report max_tx_qs value */ + res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS; + } else { res->max_tx_qs = BE3_MAX_TX_QS; + } if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && !use_sriov && be_physfn(adapter)) @@ -3362,7 +3403,7 @@ static int be_get_sriov_config(struct be_adapter *adapter) if (!be_max_vfs(adapter)) { if (num_vfs) - dev_warn(dev, "device doesn't support SRIOV\n"); + dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n"); adapter->num_vfs = 0; return 0; } @@ -3413,16 +3454,16 @@ static int be_get_resources(struct be_adapter *adapter) if (be_roce_supported(adapter)) res.max_evt_qs /= 2; adapter->res = res; - - dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", - be_max_txqs(adapter), be_max_rxqs(adapter), - be_max_rss(adapter), be_max_eqs(adapter), - be_max_vfs(adapter)); - dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", - be_max_uc(adapter), be_max_mc(adapter), - be_max_vlans(adapter)); } + dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", + be_max_txqs(adapter), be_max_rxqs(adapter), + be_max_rss(adapter), be_max_eqs(adapter), + be_max_vfs(adapter)); + dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", + be_max_uc(adapter), be_max_mc(adapter), + be_max_vlans(adapter)); + return 0; } @@ -3633,9 +3674,10 @@ static int be_setup(struct be_adapter *adapter) goto err; be_cmd_get_fw_ver(adapter); + dev_info(dev, "FW version is %s\n", adapter->fw_ver); if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) { - dev_err(dev, "Firmware on card is old(%s), IRQs may not work.", + dev_err(dev, "Firmware on card is old(%s), IRQs may not work", adapter->fw_ver); dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); } @@ -3683,8 +3725,6 @@ static void be_netpoll(struct net_device *netdev) be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0); napi_schedule(&eqo->napi); } - - return; } #endif @@ -4052,6 +4092,7 @@ static int lancer_fw_download(struct be_adapter *adapter, { #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) #define LANCER_FW_DOWNLOAD_LOCATION "/prg" + struct device *dev = &adapter->pdev->dev; struct be_dma_mem flash_cmd; const u8 *data_ptr = NULL; u8 *dest_image_ptr = NULL; @@ -4064,21 +4105,16 @@ static int lancer_fw_download(struct be_adapter *adapter, u8 change_status; if (!IS_ALIGNED(fw->size, sizeof(u32))) { - dev_err(&adapter->pdev->dev, - "FW Image not properly aligned. " - "Length must be 4 byte aligned.\n"); - status = -EINVAL; - goto lancer_fw_exit; + dev_err(dev, "FW image size should be multiple of 4\n"); + return -EINVAL; } flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + LANCER_FW_DOWNLOAD_CHUNK; - flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, + flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, GFP_KERNEL); - if (!flash_cmd.va) { - status = -ENOMEM; - goto lancer_fw_exit; - } + if (!flash_cmd.va) + return -ENOMEM; dest_image_ptr = flash_cmd.va + sizeof(struct lancer_cmd_req_write_object); @@ -4113,35 +4149,27 @@ static int lancer_fw_download(struct be_adapter *adapter, &add_status); } - dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, - flash_cmd.dma); + dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); if (status) { - dev_err(&adapter->pdev->dev, - "Firmware load error. " - "Status code: 0x%x Additional Status: 0x%x\n", - status, add_status); - goto lancer_fw_exit; + dev_err(dev, "Firmware load error\n"); + return be_cmd_status(status); } + dev_info(dev, "Firmware flashed successfully\n"); + if (change_status == LANCER_FW_RESET_NEEDED) { - dev_info(&adapter->pdev->dev, - "Resetting adapter to activate new FW\n"); + dev_info(dev, "Resetting adapter to activate new FW\n"); status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { - dev_err(&adapter->pdev->dev, - "Adapter busy for FW reset.\n" - "New FW will not be active.\n"); - goto lancer_fw_exit; + dev_err(dev, "Adapter busy, could not reset FW\n"); + dev_err(dev, "Reboot server to activate new FW\n"); } } else if (change_status != LANCER_NO_RESET_NEEDED) { - dev_err(&adapter->pdev->dev, - "System reboot required for new FW to be active\n"); + dev_info(dev, "Reboot server to activate new FW\n"); } - dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); -lancer_fw_exit: - return status; + return 0; } #define UFI_TYPE2 2 @@ -4374,7 +4402,6 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, return; err: be_disable_vxlan_offloads(adapter); - return; } static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, @@ -4506,6 +4533,7 @@ static int be_map_pci_bars(struct be_adapter *adapter) return 0; pci_map_err: + dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); be_unmap_pci_bars(adapter); return -ENOMEM; } @@ -4713,7 +4741,6 @@ static void be_func_recovery_task(struct work_struct *work) be_detect_error(adapter); if (adapter->hw_error && lancer_chip(adapter)) { - rtnl_lock(); netif_device_detach(adapter->netdev); rtnl_unlock(); @@ -4750,7 +4777,7 @@ static void be_worker(struct work_struct *work) if (!adapter->stats_cmd_sent) { if (lancer_chip(adapter)) lancer_cmd_get_pport_stats(adapter, - &adapter->stats_cmd); + &adapter->stats_cmd); else be_cmd_get_stats(adapter, &adapter->stats_cmd); } @@ -4764,7 +4791,7 @@ static void be_worker(struct work_struct *work) * allocation failures. */ if (rxo->rx_post_starved) - be_post_rx_frags(rxo, GFP_KERNEL); + be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); } be_eqd_update(adapter); @@ -4822,6 +4849,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) struct net_device *netdev; char port_name; + dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER); + status = pci_enable_device(pdev); if (status) goto do_none; @@ -4853,11 +4882,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) } } - if (be_physfn(adapter)) { - status = pci_enable_pcie_error_reporting(pdev); - if (!status) - dev_info(&pdev->dev, "PCIe error reporting enabled\n"); - } + status = pci_enable_pcie_error_reporting(pdev); + if (!status) + dev_info(&pdev->dev, "PCIe error reporting enabled\n"); status = be_ctrl_init(adapter); if (status) @@ -4897,7 +4924,8 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) INIT_DELAYED_WORK(&adapter->work, be_worker); INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task); - adapter->rx_fc = adapter->tx_fc = true; + adapter->rx_fc = true; + adapter->tx_fc = true; status = be_setup(adapter); if (status) diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index ef4672dc7357..132866433a25 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -174,6 +174,7 @@ int be_roce_register_driver(struct ocrdma_driver *drv) ocrdma_drv = drv; list_for_each_entry(dev, &be_adapter_list, entry) { struct net_device *netdev; + _be_roce_dev_add(dev); netdev = dev->netdev; if (netif_running(netdev) && netif_oper_up(netdev)) diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index ee41d98b44b6..26fb1de98826 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -27,8 +27,8 @@ */ #define FEC_IEVENT 0x004 /* Interrupt event reg */ #define FEC_IMASK 0x008 /* Interrupt mask reg */ -#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ -#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ +#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ +#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ #define FEC_ECNTRL 0x024 /* Ethernet control reg */ #define FEC_MII_DATA 0x040 /* MII manage frame reg */ #define FEC_MII_SPEED 0x044 /* MII speed control reg */ @@ -38,6 +38,12 @@ #define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ #define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ #define FEC_OPD 0x0ec /* Opcode + Pause duration */ +#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */ +#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */ +#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */ +#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */ +#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */ +#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */ #define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */ #define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */ #define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */ @@ -45,14 +51,27 @@ #define FEC_X_WMRK 0x144 /* FIFO transmit water mark */ #define FEC_R_BOUND 0x14c /* FIFO receive bound reg */ #define FEC_R_FSTART 0x150 /* FIFO receive start reg */ -#define FEC_R_DES_START 0x180 /* Receive descriptor ring */ -#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */ +#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */ +#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */ +#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */ +#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */ +#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */ +#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */ #define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */ #define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ #define FEC_RACC 0x1C4 /* Receive Accelerator function */ +#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ +#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ +#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */ +#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */ +#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */ +#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */ +#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */ +#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */ +#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */ #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ @@ -121,8 +140,12 @@ #define FEC_IEVENT 0x004 /* Interrupt even reg */ #define FEC_IMASK 0x008 /* Interrupt mask reg */ #define FEC_IVEC 0x00c /* Interrupt vec status reg */ -#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */ -#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */ +#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */ +#define FEC_R_DES_ACTIVE_1 FEC_R_DES_ACTIVE_0 +#define FEC_R_DES_ACTIVE_2 FEC_R_DES_ACTIVE_0 +#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */ +#define FEC_X_DES_ACTIVE_1 FEC_X_DES_ACTIVE_0 +#define FEC_X_DES_ACTIVE_2 FEC_X_DES_ACTIVE_0 #define FEC_MII_DATA 0x040 /* MII manage frame reg */ #define FEC_MII_SPEED 0x044 /* MII speed control reg */ #define FEC_R_BOUND 0x08c /* FIFO receive bound reg */ @@ -136,11 +159,27 @@ #define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */ #define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */ #define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */ -#define FEC_R_DES_START 0x3d0 /* Receive descriptor ring */ -#define FEC_X_DES_START 0x3d4 /* Transmit descriptor ring */ +#define FEC_R_DES_START_0 0x3d0 /* Receive descriptor ring */ +#define FEC_R_DES_START_1 FEC_R_DES_START_0 +#define FEC_R_DES_START_2 FEC_R_DES_START_0 +#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */ +#define FEC_X_DES_START_1 FEC_X_DES_START_0 +#define FEC_X_DES_START_2 FEC_X_DES_START_0 #define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */ #define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */ - +/* Not existed in real chip + * Just for pass build. + */ +#define FEC_RCMR_1 0xFFF +#define FEC_RCMR_2 0xFFF +#define FEC_DMA_CFG_1 0xFFF +#define FEC_DMA_CFG_2 0xFFF +#define FEC_TXIC0 0xFFF +#define FEC_TXIC1 0xFFF +#define FEC_TXIC2 0xFFF +#define FEC_RXIC0 0xFFF +#define FEC_RXIC1 0xFFF +#define FEC_RXIC2 0xFFF #endif /* CONFIG_M5272 */ @@ -233,6 +272,43 @@ struct bufdesc_ex { /* This device has up to three irqs on some platforms */ #define FEC_IRQ_NUM 3 +/* Maximum number of queues supported + * ENET with AVB IP can support up to 3 independent tx queues and rx queues. + * User can point the queue number that is less than or equal to 3. + */ +#define FEC_ENET_MAX_TX_QS 3 +#define FEC_ENET_MAX_RX_QS 3 + +#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \ + ((X == 2) ? \ + FEC_R_DES_START_2 : FEC_R_DES_START_0)) +#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \ + ((X == 2) ? \ + FEC_X_DES_START_2 : FEC_X_DES_START_0)) +#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \ + ((X == 2) ? \ + FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) +#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \ + ((X == 2) ? \ + FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) + +#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) + +#define DMA_CLASS_EN (1 << 16) +#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1) +#define IDLE_SLOPE_MASK 0xFFFF +#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */ +#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ + (IDLE_SLOPE_2 & IDLE_SLOPE_MASK)) +#define RCMR_MATCHEN (0x1 << 16) +#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2)) +#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \ + RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3)) +#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \ + RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3)) +#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2) + /* The number of Tx and Rx buffers. These are allocated from the page * pool. The code may assume these are power of two, so it it best * to keep them that size. @@ -240,7 +316,7 @@ struct bufdesc_ex { * the skbuffer directly. */ -#define FEC_ENET_RX_PAGES 8 +#define FEC_ENET_RX_PAGES 256 #define FEC_ENET_RX_FRSIZE 2048 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) @@ -256,6 +332,69 @@ struct bufdesc_ex { #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +/* Interrupt events/masks. */ +#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ +#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ +#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ +#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ +#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */ +#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */ +#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */ +#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ +#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */ +#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */ +#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */ +#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ +#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ +#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ +#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2) +#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2) +#define FEC_ENET_TS_AVAIL ((uint)0x00010000) +#define FEC_ENET_TS_TIMER ((uint)0x00008000) + +#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) + +/* ENET interrupt coalescing macro define */ +#define FEC_ITR_CLK_SEL (0x1 << 30) +#define FEC_ITR_EN (0x1 << 31) +#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20) +#define FEC_ITR_ICTT(X) ((X) & 0xFFFF) +#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */ +#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */ + +#define FEC_VLAN_TAG_LEN 0x04 +#define FEC_ETHTYPE_LEN 0x02 + +struct fec_enet_priv_tx_q { + int index; + unsigned char *tx_bounce[TX_RING_SIZE]; + struct sk_buff *tx_skbuff[TX_RING_SIZE]; + + dma_addr_t bd_dma; + struct bufdesc *tx_bd_base; + uint tx_ring_size; + + unsigned short tx_stop_threshold; + unsigned short tx_wake_threshold; + + struct bufdesc *cur_tx; + struct bufdesc *dirty_tx; + char *tso_hdrs; + dma_addr_t tso_hdrs_dma; +}; + +struct fec_enet_priv_rx_q { + int index; + struct sk_buff *rx_skbuff[RX_RING_SIZE]; + + dma_addr_t bd_dma; + struct bufdesc *rx_bd_base; + uint rx_ring_size; + + struct bufdesc *cur_rx; +}; + /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. @@ -272,36 +411,28 @@ struct fec_enet_private { struct clk *clk_ipg; struct clk *clk_ahb; + struct clk *clk_ref; struct clk *clk_enet_out; struct clk *clk_ptp; bool ptp_clk_on; struct mutex ptp_clk_mutex; + unsigned int num_tx_queues; + unsigned int num_rx_queues; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ - unsigned char *tx_bounce[TX_RING_SIZE]; - struct sk_buff *tx_skbuff[TX_RING_SIZE]; - struct sk_buff *rx_skbuff[RX_RING_SIZE]; + struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS]; + struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS]; - /* CPM dual port RAM relative addresses */ - dma_addr_t bd_dma; - /* Address of Rx and Tx buffers */ - struct bufdesc *rx_bd_base; - struct bufdesc *tx_bd_base; - /* The next free ring entry */ - struct bufdesc *cur_rx, *cur_tx; - /* The ring entries to be free()ed */ - struct bufdesc *dirty_tx; + unsigned int total_tx_ring_size; + unsigned int total_rx_ring_size; - unsigned short bufdesc_size; - unsigned short tx_ring_size; - unsigned short rx_ring_size; - unsigned short tx_stop_threshold; - unsigned short tx_wake_threshold; + unsigned long work_tx; + unsigned long work_rx; + unsigned long work_ts; + unsigned long work_mdio; - /* Software TSO */ - char *tso_hdrs; - dma_addr_t tso_hdrs_dma; + unsigned short bufdesc_size; struct platform_device *pdev; @@ -340,6 +471,16 @@ struct fec_enet_private { int hwts_tx_en; struct delayed_work time_keep; struct regulator *reg_phy; + + unsigned int tx_align; + unsigned int rx_align; + + /* hw interrupt coalesce */ + unsigned int rx_pkts_itr; + unsigned int rx_time_itr; + unsigned int tx_pkts_itr; + unsigned int tx_time_itr; + unsigned int itr_clk_rate; }; void fec_ptp_init(struct platform_device *pdev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 89355a719625..3a4ec0ff1a8c 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -63,15 +63,12 @@ #include "fec.h" static void set_multicast_list(struct net_device *ndev); - -#if defined(CONFIG_ARM) -#define FEC_ALIGNMENT 0xf -#else -#define FEC_ALIGNMENT 0x3 -#endif +static void fec_enet_itr_coal_init(struct net_device *ndev); #define DRIVER_NAME "fec" +#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) + /* Pause frame feild and FIFO threshold */ #define FEC_ENET_FCE (1 << 5) #define FEC_ENET_RSEM_V 0x84 @@ -104,6 +101,22 @@ static void set_multicast_list(struct net_device *ndev); * ENET_TDAR[TDAR]. */ #define FEC_QUIRK_ERR006358 (1 << 7) +/* ENET IP hw AVB + * + * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support. + * - Two class indicators on receive with configurable priority + * - Two class indicators and line speed timer on transmit allowing + * implementation class credit based shapers externally + * - Additional DMA registers provisioned to allow managing up to 3 + * independent rings + */ +#define FEC_QUIRK_HAS_AVB (1 << 8) +/* There is a TDAR race condition for mutliQ when the software sets TDAR + * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles). + * This will cause the udma_tx and udma_tx_arbiter state machines to hang. + * The issue exist at i.MX6SX enet IP. + */ +#define FEC_QUIRK_ERR007885 (1 << 9) static struct platform_device_id fec_devtype[] = { { @@ -128,6 +141,12 @@ static struct platform_device_id fec_devtype[] = { .name = "mvf600-fec", .driver_data = FEC_QUIRK_ENET_MAC, }, { + .name = "imx6sx-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | + FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885, + }, { /* sentinel */ } }; @@ -139,6 +158,7 @@ enum imx_fec_type { IMX28_FEC, IMX6Q_FEC, MVF600_FEC, + IMX6SX_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -147,6 +167,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, + { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -175,21 +196,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif #endif /* CONFIG_M5272 */ -/* Interrupt events/masks. */ -#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ -#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ -#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ -#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ -#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ -#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ -#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ -#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ -#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ -#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ - -#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII) -#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) - /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. */ #define PKT_MAXBUF_SIZE 1522 @@ -242,22 +248,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); static int mii_cnt; static inline -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) { struct bufdesc *new_bd = bdp + 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; struct bufdesc_ex *ex_base; struct bufdesc *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; } if (fep->bufdesc_ex) @@ -269,22 +279,26 @@ struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_priva } static inline -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, + struct fec_enet_private *fep, + int queue_id) { struct bufdesc *new_bd = bdp - 1; struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; + struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; struct bufdesc_ex *ex_base; struct bufdesc *base; int ring_size; - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; + if (bdp >= txq->tx_bd_base) { + base = txq->tx_bd_base; + ring_size = txq->tx_ring_size; + ex_base = (struct bufdesc_ex *)txq->tx_bd_base; } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; + base = rxq->rx_bd_base; + ring_size = rxq->rx_ring_size; + ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; } if (fep->bufdesc_ex) @@ -300,14 +314,15 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; } -static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep) +static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, + struct fec_enet_priv_tx_q *txq) { int entries; - entries = ((const char *)fep->dirty_tx - - (const char *)fep->cur_tx) / fep->bufdesc_size - 1; + entries = ((const char *)txq->dirty_tx - + (const char *)txq->cur_tx) / fep->bufdesc_size - 1; - return entries > 0 ? entries : entries + fep->tx_ring_size; + return entries > 0 ? entries : entries + txq->tx_ring_size; } static void *swap_buffer(void *bufaddr, int len) @@ -324,22 +339,26 @@ static void *swap_buffer(void *bufaddr, int len) static void fec_dump(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct bufdesc *bdp = fep->tx_bd_base; - unsigned int index = 0; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + int index = 0; netdev_info(ndev, "TX ring dump\n"); pr_info("Nr SC addr len SKB\n"); + txq = fep->tx_queue[0]; + bdp = txq->tx_bd_base; + do { pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", index, - bdp == fep->cur_tx ? 'S' : ' ', - bdp == fep->dirty_tx ? 'H' : ' ', + bdp == txq->cur_tx ? 'S' : ' ', + bdp == txq->dirty_tx ? 'H' : ' ', bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, - fep->tx_skbuff[index]); - bdp = fec_enet_get_nextdesc(bdp, fep); + txq->tx_skbuff[index]); + bdp = fec_enet_get_nextdesc(bdp, fep, 0); index++; - } while (bdp != fep->tx_bd_base); + } while (bdp != txq->tx_bd_base); } static inline bool is_ipv4_pkt(struct sk_buff *skb) @@ -365,14 +384,17 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) } static int -fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) +fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - struct bufdesc *bdp = fep->cur_tx; + struct bufdesc *bdp = txq->cur_tx; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; + unsigned short queue = skb_get_queue_mapping(skb); int frag, frag_len; unsigned short status; unsigned int estatus = 0; @@ -384,7 +406,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) for (frag = 0; frag < nr_frags; frag++) { this_frag = &skb_shinfo(skb)->frags[frag]; - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); ebdp = (struct bufdesc_ex *)bdp; status = bdp->cbd_sc; @@ -412,11 +434,11 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], bufaddr, frag_len); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], bufaddr, frag_len); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, frag_len); @@ -436,21 +458,22 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev) bdp->cbd_sc = status; } - fep->cur_tx = bdp; + txq->cur_tx = bdp; return 0; dma_mapping_error: - bdp = fep->cur_tx; + bdp = txq->cur_tx; for (i = 0; i < frag; i++) { - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); } return NETDEV_TX_OK; } -static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) +static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = @@ -461,12 +484,13 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) dma_addr_t addr; unsigned short status; unsigned short buflen; + unsigned short queue; unsigned int estatus = 0; unsigned int index; int entries_free; int ret; - entries_free = fec_enet_get_free_txdesc_num(fep); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); if (entries_free < MAX_SKB_FRAGS + 1) { dev_kfree_skb_any(skb); if (net_ratelimit()) @@ -481,7 +505,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) } /* Fill in a Tx ring entry */ - bdp = fep->cur_tx; + bdp = txq->cur_tx; status = bdp->cbd_sc; status &= ~BD_ENET_TX_STATS; @@ -489,11 +513,12 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) bufaddr = skb->data; buflen = skb_headlen(skb); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + queue = skb_get_queue_mapping(skb); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + if (((unsigned long) bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], skb->data, buflen); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], skb->data, buflen); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, buflen); @@ -509,7 +534,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) } if (nr_frags) { - ret = fec_enet_txq_submit_frag_skb(skb, ndev); + ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); if (ret) return ret; } else { @@ -537,10 +562,10 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) ebdp->cbd_esc = estatus; } - last_bdp = fep->cur_tx; - index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep); + last_bdp = txq->cur_tx; + index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + txq->tx_skbuff[index] = skb; bdp->cbd_datlen = buflen; bdp->cbd_bufaddr = addr; @@ -552,27 +577,28 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev) bdp->cbd_sc = status; /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(last_bdp, fep); + bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); skb_tx_timestamp(skb); - fep->cur_tx = bdp; + txq->cur_tx = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); return 0; } static int -fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, - struct bufdesc *bdp, int index, char *data, - int size, bool last_tcp, bool is_last) +fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, + struct net_device *ndev, + struct bufdesc *bdp, int index, char *data, + int size, bool last_tcp, bool is_last) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); unsigned short status; unsigned int estatus = 0; dma_addr_t addr; @@ -582,10 +608,10 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - if (((unsigned long) data) & FEC_ALIGNMENT || + if (((unsigned long) data) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], data, size); - data = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], data, size); + data = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, size); @@ -624,14 +650,15 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev, } static int -fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, - struct bufdesc *bdp, int index) +fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, struct net_device *ndev, + struct bufdesc *bdp, int index) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); void *bufaddr; unsigned long dmabuf; unsigned short status; @@ -641,12 +668,12 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); - bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE; - dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE; - if (((unsigned long) bufaddr) & FEC_ALIGNMENT || + bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; + dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; + if (((unsigned long)bufaddr) & fep->tx_align || id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { - memcpy(fep->tx_bounce[index], skb->data, hdr_len); - bufaddr = fep->tx_bounce[index]; + memcpy(txq->tx_bounce[index], skb->data, hdr_len); + bufaddr = txq->tx_bounce[index]; if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(bufaddr, hdr_len); @@ -676,17 +703,22 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev, return 0; } -static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) +static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, + struct sk_buff *skb, + struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int total_len, data_left; - struct bufdesc *bdp = fep->cur_tx; + struct bufdesc *bdp = txq->cur_tx; + unsigned short queue = skb_get_queue_mapping(skb); struct tso_t tso; unsigned int index = 0; int ret; + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); - if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) { + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "NOT enough BD for TSO!\n"); @@ -706,14 +738,14 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) while (total_len > 0) { char *hdr; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; /* prepare packet headers: MAC + IP + TCP */ - hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE; + hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); - ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index); + ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); if (ret) goto err_release; @@ -721,10 +753,13 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) int size; size = min_t(int, tso.size, data_left); - bdp = fec_enet_get_nextdesc(bdp, fep); - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); - ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data, - size, size == data_left, + bdp = fec_enet_get_nextdesc(bdp, fep, queue); + index = fec_enet_get_bd_index(txq->tx_bd_base, + bdp, fep); + ret = fec_enet_txq_put_data_tso(txq, skb, ndev, + bdp, index, + tso.data, size, + size == data_left, total_len == 0); if (ret) goto err_release; @@ -733,17 +768,22 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev) tso_build_data(skb, &tso, size); } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + txq->tx_skbuff[index] = skb; skb_tx_timestamp(skb); - fep->cur_tx = bdp; + txq->cur_tx = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || + !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); return 0; @@ -757,18 +797,25 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); int entries_free; + unsigned short queue; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; int ret; + queue = skb_get_queue_mapping(skb); + txq = fep->tx_queue[queue]; + nq = netdev_get_tx_queue(ndev, queue); + if (skb_is_gso(skb)) - ret = fec_enet_txq_submit_tso(skb, ndev); + ret = fec_enet_txq_submit_tso(txq, skb, ndev); else - ret = fec_enet_txq_submit_skb(skb, ndev); + ret = fec_enet_txq_submit_skb(txq, skb, ndev); if (ret) return ret; - entries_free = fec_enet_get_free_txdesc_num(fep); - if (entries_free <= fep->tx_stop_threshold) - netif_stop_queue(ndev); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free <= txq->tx_stop_threshold) + netif_tx_stop_queue(nq); return NETDEV_TX_OK; } @@ -778,46 +825,111 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) static void fec_enet_bd_init(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned int i; + unsigned int q; - /* Initialize the receive buffer descriptors. */ - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { + for (q = 0; q < fep->num_rx_queues; q++) { + /* Initialize the receive buffer descriptors. */ + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; - /* Initialize the BD for every fragment in the page. */ - if (bdp->cbd_bufaddr) - bdp->cbd_sc = BD_ENET_RX_EMPTY; - else + for (i = 0; i < rxq->rx_ring_size; i++) { + + /* Initialize the BD for every fragment in the page. */ + if (bdp->cbd_bufaddr) + bdp->cbd_sc = BD_ENET_RX_EMPTY; + else + bdp->cbd_sc = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + + rxq->cur_rx = rxq->rx_bd_base; + } + + for (q = 0; q < fep->num_tx_queues; q++) { + /* ...and the same for transmit */ + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + txq->cur_tx = bdp; + + for (i = 0; i < txq->tx_ring_size; i++) { + /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); + if (txq->tx_skbuff[i]) { + dev_kfree_skb_any(txq->tx_skbuff[i]); + txq->tx_skbuff[i] = NULL; + } + bdp->cbd_bufaddr = 0; + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + + /* Set the last buffer to wrap */ + bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp->cbd_sc |= BD_SC_WRAP; + txq->dirty_tx = bdp; } +} - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; +static void fec_enet_active_rxring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; - fep->cur_rx = fep->rx_bd_base; + for (i = 0; i < fep->num_rx_queues; i++) + writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); +} - /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - fep->cur_tx = bdp; - for (i = 0; i < fep->tx_ring_size; i++) { +static void fec_enet_enable_ring(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + int i; - /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; - } - bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(RCMR_MATCHEN | RCMR_CMP(i), + fep->hwp + FEC_RCMR(i)); } - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; - fep->dirty_tx = bdp; + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); + + /* enable DMA1/2 */ + if (i) + writel(DMA_CLASS_EN | IDLE_SLOPE(i), + fep->hwp + FEC_DMA_CFG(i)); + } +} + +static void fec_enet_reset_skb(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct fec_enet_priv_tx_q *txq; + int i, j; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + + for (j = 0; j < txq->tx_ring_size; j++) { + if (txq->tx_skbuff[j]) { + dev_kfree_skb_any(txq->tx_skbuff[j]); + txq->tx_skbuff[j] = NULL; + } + } + } } /* @@ -831,15 +943,21 @@ fec_restart(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); - int i; u32 val; u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } /* * enet-mac reset will reset mac address registers too, @@ -859,22 +977,10 @@ fec_restart(struct net_device *ndev) fec_enet_bd_init(ndev); - /* Set receive and transmit descriptor base. */ - writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); - if (fep->bufdesc_ex) - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); - else - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); - + fec_enet_enable_ring(ndev); - for (i = 0; i <= TX_RING_MOD_MASK; i++) { - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; - } - } + /* Reset tx SKB buffers. */ + fec_enet_reset_skb(ndev); /* Enable MII mode */ if (fep->full_duplex == DUPLEX_FULL) { @@ -996,13 +1102,17 @@ fec_restart(struct net_device *ndev) /* And last, enable the transmit and receive processing */ writel(ecntl, fep->hwp + FEC_ECNTRL); - writel(0, fep->hwp + FEC_R_DES_ACTIVE); + fec_enet_active_rxring(ndev); if (fep->bufdesc_ex) fec_ptp_start_cyclecounter(ndev); /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + + /* Init the interrupt coalescing */ + fec_enet_itr_coal_init(ndev); + } static void @@ -1021,9 +1131,16 @@ fec_stop(struct net_device *ndev) netdev_err(ndev, "Graceful transmit stop did not complete!\n"); } - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); + /* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ + if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@ -1081,37 +1198,45 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, } static void -fec_enet_tx(struct net_device *ndev) +fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) { struct fec_enet_private *fep; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; + struct fec_enet_priv_tx_q *txq; + struct netdev_queue *nq; int index = 0; int entries_free; fep = netdev_priv(ndev); - bdp = fep->dirty_tx; + queue_id = FEC_ENET_GET_QUQUE(queue_id); + + txq = fep->tx_queue[queue_id]; /* get next bdp of dirty_tx */ - bdp = fec_enet_get_nextdesc(bdp, fep); + nq = netdev_get_tx_queue(ndev, queue_id); + bdp = txq->dirty_tx; + + /* get next bdp of dirty_tx */ + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { /* current queue is empty */ - if (bdp == fep->cur_tx) + if (bdp == txq->cur_tx) break; - index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); - skb = fep->tx_skbuff[index]; - fep->tx_skbuff[index] = NULL; - if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr)) + skb = txq->tx_skbuff[index]; + txq->tx_skbuff[index] = NULL; + if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, bdp->cbd_datlen, DMA_TO_DEVICE); bdp->cbd_bufaddr = 0; if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); continue; } @@ -1153,23 +1278,37 @@ fec_enet_tx(struct net_device *ndev) /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); - fep->dirty_tx = bdp; + txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); /* Since we have freed up a buffer, the ring is no longer full */ if (netif_queue_stopped(ndev)) { - entries_free = fec_enet_get_free_txdesc_num(fep); - if (entries_free >= fep->tx_wake_threshold) - netif_wake_queue(ndev); + entries_free = fec_enet_get_free_txdesc_num(fep, txq); + if (entries_free >= txq->tx_wake_threshold) + netif_tx_wake_queue(nq); } } /* ERR006538: Keep the transmitter going */ - if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + if (bdp != txq->cur_tx && + readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) + writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); +} + +static void +fec_enet_tx(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + u16 queue_id; + /* First process class A queue, then Class B and Best Effort queue */ + for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { + clear_bit(queue_id, &fep->work_tx); + fec_enet_tx_queue(ndev, queue_id); + } + return; } /* During a receive, the cur_rx points to the current incoming buffer. @@ -1178,11 +1317,12 @@ fec_enet_tx(struct net_device *ndev) * effectively tossing the packet. */ static int -fec_enet_rx(struct net_device *ndev, int budget) +fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) { struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + struct fec_enet_priv_rx_q *rxq; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; @@ -1197,11 +1337,13 @@ fec_enet_rx(struct net_device *ndev, int budget) #ifdef CONFIG_M532x flush_cache_all(); #endif + queue_id = FEC_ENET_GET_QUQUE(queue_id); + rxq = fep->rx_queue[queue_id]; /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ - bdp = fep->cur_rx; + bdp = rxq->cur_rx; while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { @@ -1215,7 +1357,6 @@ fec_enet_rx(struct net_device *ndev, int budget) if ((status & BD_ENET_RX_LAST) == 0) netdev_err(ndev, "rcv is not +last\n"); - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | @@ -1248,8 +1389,8 @@ fec_enet_rx(struct net_device *ndev, int budget) pkt_len = bdp->cbd_datlen; ndev->stats.rx_bytes += pkt_len; - index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep); - data = fep->rx_skbuff[index]->data; + index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); + data = rxq->rx_skbuff[index]->data; dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); @@ -1264,7 +1405,7 @@ fec_enet_rx(struct net_device *ndev, int budget) /* If this is a VLAN packet remove the VLAN Tag */ vlan_packet_rcvd = false; if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { /* Push and remove the vlan tag */ struct vlan_hdr *vlan_header = (struct vlan_hdr *) (data + ETH_HLEN); @@ -1341,19 +1482,56 @@ rx_processing_done: } /* Update BD pointer to next entry */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ - writel(0, fep->hwp + FEC_R_DES_ACTIVE); + writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); } - fep->cur_rx = bdp; + rxq->cur_rx = bdp; + return pkt_received; +} + +static int +fec_enet_rx(struct net_device *ndev, int budget) +{ + int pkt_received = 0; + u16 queue_id; + struct fec_enet_private *fep = netdev_priv(ndev); + for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { + clear_bit(queue_id, &fep->work_rx); + pkt_received += fec_enet_rx_queue(ndev, + budget - pkt_received, queue_id); + } return pkt_received; } +static bool +fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) +{ + if (int_events == 0) + return false; + + if (int_events & FEC_ENET_RXF) + fep->work_rx |= (1 << 2); + if (int_events & FEC_ENET_RXF_1) + fep->work_rx |= (1 << 0); + if (int_events & FEC_ENET_RXF_2) + fep->work_rx |= (1 << 1); + + if (int_events & FEC_ENET_TXF) + fep->work_tx |= (1 << 2); + if (int_events & FEC_ENET_TXF_1) + fep->work_tx |= (1 << 0); + if (int_events & FEC_ENET_TXF_2) + fep->work_tx |= (1 << 1); + + return true; +} + static irqreturn_t fec_enet_interrupt(int irq, void *dev_id) { @@ -1365,6 +1543,7 @@ fec_enet_interrupt(int irq, void *dev_id) int_events = readl(fep->hwp + FEC_IEVENT); writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); + fec_enet_collect_events(fep, int_events); if (int_events & napi_mask) { ret = IRQ_HANDLED; @@ -1621,6 +1800,11 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } mutex_unlock(&fep->ptp_clk_mutex); } + if (fep->clk_ref) { + ret = clk_prepare_enable(fep->clk_ref); + if (ret) + goto failed_clk_ref; + } } else { clk_disable_unprepare(fep->clk_ahb); clk_disable_unprepare(fep->clk_ipg); @@ -1632,9 +1816,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) fep->ptp_clk_on = false; mutex_unlock(&fep->ptp_clk_mutex); } + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); } return 0; + +failed_clk_ref: + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); @@ -1674,13 +1864,13 @@ static int fec_enet_mii_probe(struct net_device *ndev) continue; if (dev_id--) continue; - strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); break; } if (phy_id >= PHY_MAX_ADDR) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); phy_id = 0; } @@ -2062,12 +2252,141 @@ static int fec_enet_nway_reset(struct net_device *dev) return genphy_restart_aneg(phydev); } +/* ITR clock source is enet system clock (clk_ahb). + * TCTT unit is cycle_ns * 64 cycle + * So, the ICTT value = X us / (cycle_ns * 64) + */ +static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + return us * (fep->itr_clk_rate / 64000) / 1000; +} + +/* Set threshold for interrupt coalescing */ +static void fec_enet_itr_coal_set(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + int rx_itr, tx_itr; + + if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + return; + + /* Must be greater than zero to avoid unpredictable behavior */ + if (!fep->rx_time_itr || !fep->rx_pkts_itr || + !fep->tx_time_itr || !fep->tx_pkts_itr) + return; + + /* Select enet system clock as Interrupt Coalescing + * timer Clock Source + */ + rx_itr = FEC_ITR_CLK_SEL; + tx_itr = FEC_ITR_CLK_SEL; + + /* set ICFT and ICTT */ + rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); + rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); + tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); + tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); + + rx_itr |= FEC_ITR_EN; + tx_itr |= FEC_ITR_EN; + + writel(tx_itr, fep->hwp + FEC_TXIC0); + writel(rx_itr, fep->hwp + FEC_RXIC0); + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); +} + +static int +fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + + if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + return -EOPNOTSUPP; + + ec->rx_coalesce_usecs = fep->rx_time_itr; + ec->rx_max_coalesced_frames = fep->rx_pkts_itr; + + ec->tx_coalesce_usecs = fep->tx_time_itr; + ec->tx_max_coalesced_frames = fep->tx_pkts_itr; + + return 0; +} + +static int +fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + + unsigned int cycle; + + if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) + return -EOPNOTSUPP; + + if (ec->rx_max_coalesced_frames > 255) { + pr_err("Rx coalesced frames exceed hardware limiation"); + return -EINVAL; + } + + if (ec->tx_max_coalesced_frames > 255) { + pr_err("Tx coalesced frame exceed hardware limiation"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); + if (cycle > 0xFFFF) { + pr_err("Rx coalesed usec exceeed hardware limiation"); + return -EINVAL; + } + + cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); + if (cycle > 0xFFFF) { + pr_err("Rx coalesed usec exceeed hardware limiation"); + return -EINVAL; + } + + fep->rx_time_itr = ec->rx_coalesce_usecs; + fep->rx_pkts_itr = ec->rx_max_coalesced_frames; + + fep->tx_time_itr = ec->tx_coalesce_usecs; + fep->tx_pkts_itr = ec->tx_max_coalesced_frames; + + fec_enet_itr_coal_set(ndev); + + return 0; +} + +static void fec_enet_itr_coal_init(struct net_device *ndev) +{ + struct ethtool_coalesce ec; + + ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + + ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; + ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; + + fec_enet_set_coalesce(ndev, &ec); +} + static const struct ethtool_ops fec_enet_ethtool_ops = { .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, .nway_reset = fec_enet_nway_reset, .get_link = ethtool_op_get_link, + .get_coalesce = fec_enet_get_coalesce, + .set_coalesce = fec_enet_set_coalesce, #ifndef CONFIG_M5272 .get_pauseparam = fec_enet_get_pauseparam, .set_pauseparam = fec_enet_set_pauseparam, @@ -2105,46 +2424,140 @@ static void fec_enet_free_buffers(struct net_device *ndev) unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; + unsigned int q; + + for (q = 0; q < fep->num_rx_queues; q++) { + rxq = fep->rx_queue[q]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { + skb = rxq->rx_skbuff[i]; + rxq->rx_skbuff[i] = NULL; + if (skb) { + dma_unmap_single(&fep->pdev->dev, + bdp->cbd_bufaddr, + FEC_ENET_RX_FRSIZE, + DMA_FROM_DEVICE); + dev_kfree_skb(skb); + } + bdp = fec_enet_get_nextdesc(bdp, fep, q); + } + } - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { - skb = fep->rx_skbuff[i]; - fep->rx_skbuff[i] = NULL; - if (skb) { - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + for (q = 0; q < fep->num_tx_queues; q++) { + txq = fep->tx_queue[q]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + kfree(txq->tx_bounce[i]); + txq->tx_bounce[i] = NULL; + skb = txq->tx_skbuff[i]; + txq->tx_skbuff[i] = NULL; dev_kfree_skb(skb); } - bdp = fec_enet_get_nextdesc(bdp, fep); } +} + +static void fec_enet_free_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { + txq = fep->tx_queue[i]; + dma_free_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + txq->tso_hdrs, + txq->tso_hdrs_dma); + } + + for (i = 0; i < fep->num_rx_queues; i++) + if (fep->rx_queue[i]) + kfree(fep->rx_queue[i]); + + for (i = 0; i < fep->num_tx_queues; i++) + if (fep->tx_queue[i]) + kfree(fep->tx_queue[i]); +} + +static int fec_enet_alloc_queue(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + int i; + int ret = 0; + struct fec_enet_priv_tx_q *txq; + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) { + ret = -ENOMEM; + goto alloc_failed; + } - bdp = fep->tx_bd_base; - for (i = 0; i < fep->tx_ring_size; i++) { - kfree(fep->tx_bounce[i]); - fep->tx_bounce[i] = NULL; - skb = fep->tx_skbuff[i]; - fep->tx_skbuff[i] = NULL; - dev_kfree_skb(skb); + fep->tx_queue[i] = txq; + txq->tx_ring_size = TX_RING_SIZE; + fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; + + txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; + txq->tx_wake_threshold = + (txq->tx_ring_size - txq->tx_stop_threshold) / 2; + + txq->tso_hdrs = dma_alloc_coherent(NULL, + txq->tx_ring_size * TSO_HEADER_SIZE, + &txq->tso_hdrs_dma, + GFP_KERNEL); + if (!txq->tso_hdrs) { + ret = -ENOMEM; + goto alloc_failed; + } + } + + for (i = 0; i < fep->num_rx_queues; i++) { + fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), + GFP_KERNEL); + if (!fep->rx_queue[i]) { + ret = -ENOMEM; + goto alloc_failed; + } + + fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; + fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; } + return ret; + +alloc_failed: + fec_enet_free_queue(ndev); + return ret; } -static int fec_enet_alloc_buffers(struct net_device *ndev) +static int +fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) { struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; struct sk_buff *skb; struct bufdesc *bdp; + struct fec_enet_priv_rx_q *rxq; + unsigned int off; - bdp = fep->rx_bd_base; - for (i = 0; i < fep->rx_ring_size; i++) { + rxq = fep->rx_queue[queue]; + bdp = rxq->rx_bd_base; + for (i = 0; i < rxq->rx_ring_size; i++) { dma_addr_t addr; skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (!skb) goto err_alloc; + off = ((unsigned long)skb->data) & fep->rx_align; + if (off) + skb_reserve(skb, fep->rx_align + 1 - off); + addr = dma_map_single(&fep->pdev->dev, skb->data, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, addr)) { dev_kfree_skb(skb); if (net_ratelimit()) @@ -2152,7 +2565,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) goto err_alloc; } - fep->rx_skbuff[i] = skb; + rxq->rx_skbuff[i] = skb; bdp->cbd_bufaddr = addr; bdp->cbd_sc = BD_ENET_RX_EMPTY; @@ -2161,17 +2574,32 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_RX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp->cbd_sc |= BD_SC_WRAP; + return 0; - bdp = fep->tx_bd_base; - for (i = 0; i < fep->tx_ring_size; i++) { - fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); - if (!fep->tx_bounce[i]) + err_alloc: + fec_enet_free_buffers(ndev); + return -ENOMEM; +} + +static int +fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + struct bufdesc *bdp; + struct fec_enet_priv_tx_q *txq; + + txq = fep->tx_queue[queue]; + bdp = txq->tx_bd_base; + for (i = 0; i < txq->tx_ring_size; i++) { + txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); + if (!txq->tx_bounce[i]) goto err_alloc; bdp->cbd_sc = 0; @@ -2182,11 +2610,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) ebdp->cbd_esc = BD_ENET_TX_INT; } - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, fep, queue); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); + bdp = fec_enet_get_prevdesc(bdp, fep, queue); bdp->cbd_sc |= BD_SC_WRAP; return 0; @@ -2196,6 +2624,21 @@ static int fec_enet_alloc_buffers(struct net_device *ndev) return -ENOMEM; } +static int fec_enet_alloc_buffers(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned int i; + + for (i = 0; i < fep->num_rx_queues; i++) + if (fec_enet_alloc_rxq_buffers(ndev, i)) + return -ENOMEM; + + for (i = 0; i < fep->num_tx_queues; i++) + if (fec_enet_alloc_txq_buffers(ndev, i)) + return -ENOMEM; + return 0; +} + static int fec_enet_open(struct net_device *ndev) { @@ -2225,7 +2668,8 @@ fec_enet_open(struct net_device *ndev) fec_restart(ndev); napi_enable(&fep->napi); phy_start(fep->phy_dev); - netif_start_queue(ndev); + netif_tx_start_all_queues(ndev); + return 0; } @@ -2399,7 +2843,7 @@ static int fec_set_features(struct net_device *netdev, /* Resume the device after updates */ if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { fec_restart(netdev); - netif_wake_queue(netdev); + netif_tx_wake_all_queues(netdev); netif_tx_unlock_bh(netdev); napi_enable(&fep->napi); } @@ -2407,10 +2851,17 @@ static int fec_set_features(struct net_device *netdev, return 0; } +u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + return skb_tx_hash(ndev, skb); +} + static const struct net_device_ops fec_netdev_ops = { .ndo_open = fec_enet_open, .ndo_stop = fec_enet_close, .ndo_start_xmit = fec_enet_start_xmit, + .ndo_select_queue = fec_enet_select_queue, .ndo_set_rx_mode = set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, @@ -2432,39 +2883,38 @@ static int fec_enet_init(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + struct fec_enet_priv_tx_q *txq; + struct fec_enet_priv_rx_q *rxq; struct bufdesc *cbd_base; + dma_addr_t bd_dma; int bd_size; + unsigned int i; - /* init the tx & rx ring size */ - fep->tx_ring_size = TX_RING_SIZE; - fep->rx_ring_size = RX_RING_SIZE; +#if defined(CONFIG_ARM) + fep->rx_align = 0xf; + fep->tx_align = 0xf; +#else + fep->rx_align = 0x3; + fep->tx_align = 0x3; +#endif - fep->tx_stop_threshold = FEC_MAX_SKB_DESCS; - fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2; + fec_enet_alloc_queue(ndev); if (fep->bufdesc_ex) fep->bufdesc_size = sizeof(struct bufdesc_ex); else fep->bufdesc_size = sizeof(struct bufdesc); - bd_size = (fep->tx_ring_size + fep->rx_ring_size) * + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * fep->bufdesc_size; /* Allocate memory for buffer descriptors. */ - cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma, + cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, GFP_KERNEL); - if (!cbd_base) - return -ENOMEM; - - fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE, - &fep->tso_hdrs_dma, GFP_KERNEL); - if (!fep->tso_hdrs) { - dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma); + if (!cbd_base) { return -ENOMEM; } - memset(cbd_base, 0, PAGE_SIZE); - - fep->netdev = ndev; + memset(cbd_base, 0, bd_size); /* Get the Ethernet address */ fec_get_mac(ndev); @@ -2472,12 +2922,36 @@ static int fec_enet_init(struct net_device *ndev) fec_set_mac_address(ndev, NULL); /* Set receive and transmit descriptor base. */ - fep->rx_bd_base = cbd_base; - if (fep->bufdesc_ex) - fep->tx_bd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); - else - fep->tx_bd_base = cbd_base + fep->rx_ring_size; + for (i = 0; i < fep->num_rx_queues; i++) { + rxq = fep->rx_queue[i]; + rxq->index = i; + rxq->rx_bd_base = (struct bufdesc *)cbd_base; + rxq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; + cbd_base += rxq->rx_ring_size; + } + } + + for (i = 0; i < fep->num_tx_queues; i++) { + txq = fep->tx_queue[i]; + txq->index = i; + txq->tx_bd_base = (struct bufdesc *)cbd_base; + txq->bd_dma = bd_dma; + if (fep->bufdesc_ex) { + bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; + cbd_base = (struct bufdesc *) + (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); + } else { + bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; + cbd_base += txq->tx_ring_size; + } + } + /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; @@ -2500,6 +2974,11 @@ static int fec_enet_init(struct net_device *ndev) fep->csum_flags |= FLAG_RX_CSUM_ENABLED; } + if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) { + fep->tx_align = 0; + fep->rx_align = 0x3f; + } + ndev->hw_features = ndev->features; fec_restart(ndev); @@ -2545,6 +3024,42 @@ static void fec_reset_phy(struct platform_device *pdev) } #endif /* CONFIG_OF */ +static void +fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) +{ + struct device_node *np = pdev->dev.of_node; + int err; + + *num_tx = *num_rx = 1; + + if (!np || !of_device_is_available(np)) + return; + + /* parse the num of tx and rx queues */ + err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx); + if (err) + *num_tx = 1; + + err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx); + if (err) + *num_rx = 1; + + if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { + dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", + *num_tx); + *num_tx = 1; + return; + } + + if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { + dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", + *num_rx); + *num_rx = 1; + return; + } + +} + static int fec_probe(struct platform_device *pdev) { @@ -2556,13 +3071,18 @@ fec_probe(struct platform_device *pdev) const struct of_device_id *of_id; static int dev_id; struct device_node *np = pdev->dev.of_node, *phy_node; + int num_tx_qs; + int num_rx_qs; of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) pdev->id_entry = of_id->data; + fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); + /* Init network device */ - ndev = alloc_etherdev(sizeof(struct fec_enet_private)); + ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), + num_tx_qs, num_rx_qs); if (!ndev) return -ENOMEM; @@ -2571,6 +3091,9 @@ fec_probe(struct platform_device *pdev) /* setup board info structure */ fep = netdev_priv(ndev); + fep->num_rx_queues = num_rx_qs; + fep->num_tx_queues = num_tx_qs; + #if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ if (pdev->id_entry && @@ -2630,6 +3153,8 @@ fec_probe(struct platform_device *pdev) goto failed_clk; } + fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); + /* enet_out is optional, depends on board */ fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); if (IS_ERR(fep->clk_enet_out)) @@ -2637,6 +3162,12 @@ fec_probe(struct platform_device *pdev) fep->ptp_clk_on = false; mutex_init(&fep->ptp_clk_mutex); + + /* clk_ref is optional, depends on board */ + fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); + if (IS_ERR(fep->clk_ref)) + fep->clk_ref = NULL; + fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); fep->bufdesc_ex = pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; @@ -2684,6 +3215,7 @@ fec_probe(struct platform_device *pdev) goto failed_irq; } + init_completion(&fep->mdio_done); ret = fec_enet_mii_init(pdev); if (ret) goto failed_mii_init; diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index ed7916f6fbcf..76a6e0c77d69 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -1627,7 +1627,7 @@ static void hp100_clean_txring(struct net_device *dev) #endif /* Conversion to new PCI API : NOP */ pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE); - dev_kfree_skb_any(lp->txrhead->skb); + dev_consume_skb_any(lp->txrhead->skb); lp->txrhead->skb = NULL; lp->txrhead = lp->txrhead->next; lp->txrcommit--; @@ -1745,7 +1745,7 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb, hp100_ints_on(); spin_unlock_irqrestore(&lp->lock, flags); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); #ifdef HP100_DEBUG_TX printk("hp100: %s: start_xmit: end\n", dev->name); diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 10a0f221b183..69707108d23c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -148,16 +148,23 @@ struct e1000_adapter; /* wrapper around a pointer to a socket buffer, * so a DMA handle can be stored along with the buffer */ -struct e1000_buffer { +struct e1000_tx_buffer { struct sk_buff *skb; dma_addr_t dma; - struct page *page; unsigned long time_stamp; u16 length; u16 next_to_watch; - unsigned int segs; + bool mapped_as_page; + unsigned short segs; unsigned int bytecount; - u16 mapped_as_page; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; }; struct e1000_tx_ring { @@ -174,7 +181,7 @@ struct e1000_tx_ring { /* next descriptor to check for DD status bit */ unsigned int next_to_clean; /* array of buffer information structs */ - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; u16 tdh; u16 tdt; @@ -195,7 +202,7 @@ struct e1000_rx_ring { /* next descriptor to check for DD status bit */ unsigned int next_to_clean; /* array of buffer information structs */ - struct e1000_buffer *buffer_info; + struct e1000_rx_buffer *buffer_info; struct sk_buff *rx_skb_top; /* cpu for rx queue */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index cca5bca44e73..b691eb4f6376 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1,35 +1,30 @@ /******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ + * Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2006 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS <linux.nics@intel.com> + * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ /* ethtool support for e1000 */ #include "e1000.h" -#include <asm/uaccess.h> +#include <linux/uaccess.h> enum {NETDEV_STATS, E1000_STATS}; @@ -42,7 +37,7 @@ struct e1000_stats { #define E1000_STAT(m) E1000_STATS, \ sizeof(((struct e1000_adapter *)0)->m), \ - offsetof(struct e1000_adapter, m) + offsetof(struct e1000_adapter, m) #define E1000_NETDEV_STAT(m) NETDEV_STATS, \ sizeof(((struct net_device *)0)->m), \ offsetof(struct net_device, m) @@ -104,6 +99,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { "Interrupt test (offline)", "Loopback test (offline)", "Link test (on/offline)" }; + #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) static int e1000_get_settings(struct net_device *netdev, @@ -113,7 +109,6 @@ static int e1000_get_settings(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; if (hw->media_type == e1000_media_type_copper) { - ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | @@ -155,9 +150,8 @@ static int e1000_get_settings(struct net_device *netdev, } if (er32(STATUS) & E1000_STATUS_LU) { - e1000_get_speed_and_duplex(hw, &adapter->link_speed, - &adapter->link_duplex); + &adapter->link_duplex); ethtool_cmd_speed_set(ecmd, adapter->link_speed); /* unfortunately FULL_DUPLEX != DUPLEX_FULL @@ -247,9 +241,9 @@ static int e1000_set_settings(struct net_device *netdev, if (netif_running(adapter->netdev)) { e1000_down(adapter); e1000_up(adapter); - } else + } else { e1000_reset(adapter); - + } clear_bit(__E1000_RESETTING, &adapter->flags); return 0; } @@ -279,11 +273,11 @@ static void e1000_get_pauseparam(struct net_device *netdev, pause->autoneg = (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); - if (hw->fc == E1000_FC_RX_PAUSE) + if (hw->fc == E1000_FC_RX_PAUSE) { pause->rx_pause = 1; - else if (hw->fc == E1000_FC_TX_PAUSE) + } else if (hw->fc == E1000_FC_TX_PAUSE) { pause->tx_pause = 1; - else if (hw->fc == E1000_FC_FULL) { + } else if (hw->fc == E1000_FC_FULL) { pause->rx_pause = 1; pause->tx_pause = 1; } @@ -316,8 +310,9 @@ static int e1000_set_pauseparam(struct net_device *netdev, if (netif_running(adapter->netdev)) { e1000_down(adapter); e1000_up(adapter); - } else + } else { e1000_reset(adapter); + } } else retval = ((hw->media_type == e1000_media_type_fiber) ? e1000_setup_link(hw) : e1000_force_mac_fc(hw)); @@ -329,12 +324,14 @@ static int e1000_set_pauseparam(struct net_device *netdev, static u32 e1000_get_msglevel(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; } static void e1000_set_msglevel(struct net_device *netdev, u32 data) { struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; } @@ -526,7 +523,7 @@ static int e1000_set_eeprom(struct net_device *netdev, * only the first byte of the word is being modified */ ret_val = e1000_read_eeprom(hw, last_word, 1, - &eeprom_buff[last_word - first_word]); + &eeprom_buff[last_word - first_word]); } /* Device's eeprom is always little-endian, word addressable */ @@ -618,13 +615,12 @@ static int e1000_set_ringparam(struct net_device *netdev, adapter->tx_ring = txdr; adapter->rx_ring = rxdr; - rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); - rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD)); rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); - - txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); - txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD)); txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); @@ -680,8 +676,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, u32 mask, u32 write) { struct e1000_hw *hw = &adapter->hw; - static const u32 test[] = - {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; u8 __iomem *address = hw->hw_addr + reg; u32 read; int i; @@ -793,8 +790,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); value = E1000_RAR_ENTRIES; for (i = 0; i < value; i++) { - REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, - 0xFFFFFFFF); + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); } } else { REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); @@ -877,7 +874,6 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Test each interrupt */ for (; i < 10; i++) { - /* Interrupt to test */ mask = 1 << i; @@ -972,10 +968,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter) if (rxdr->buffer_info[i].dma) dma_unmap_single(&pdev->dev, rxdr->buffer_info[i].dma, - rxdr->buffer_info[i].length, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); - if (rxdr->buffer_info[i].skb) - dev_kfree_skb(rxdr->buffer_info[i].skb); + kfree(rxdr->buffer_info[i].rxbuf.data); } } @@ -1010,7 +1005,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) if (!txdr->count) txdr->count = E1000_DEFAULT_TXD; - txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer), + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), GFP_KERNEL); if (!txdr->buffer_info) { ret_val = 1; @@ -1069,7 +1064,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) if (!rxdr->count) rxdr->count = E1000_DEFAULT_RXD; - rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), GFP_KERNEL); if (!rxdr->buffer_info) { ret_val = 5; @@ -1099,25 +1094,25 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) for (i = 0; i < rxdr->count; i++) { struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); - struct sk_buff *skb; + u8 *buf; - skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); - if (!skb) { + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { ret_val = 7; goto err_nomem; } - skb_reserve(skb, NET_IP_ALIGN); - rxdr->buffer_info[i].skb = skb; - rxdr->buffer_info[i].length = E1000_RXBUFFER_2048; + rxdr->buffer_info[i].rxbuf.data = buf; + rxdr->buffer_info[i].dma = - dma_map_single(&pdev->dev, skb->data, + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, E1000_RXBUFFER_2048, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { ret_val = 8; goto err_nomem; } rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); - memset(skb->data, 0x00, skb->len); } return 0; @@ -1149,8 +1144,7 @@ static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) */ e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); phy_reg |= M88E1000_EPSCR_TX_CLK_25; - e1000_write_phy_reg(hw, - M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); /* In addition, because of the s/w reset above, we need to enable * CRS on TX. This must be set for both full and half duplex @@ -1158,8 +1152,7 @@ static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) */ e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; - e1000_write_phy_reg(hw, - M88E1000_PHY_SPEC_CTRL, phy_reg); + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); } static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) @@ -1216,7 +1209,7 @@ static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) /* Check Phy Configuration */ e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); if (phy_reg != 0x4100) - return 9; + return 9; e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); if (phy_reg != 0x0070) @@ -1261,7 +1254,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) E1000_CTRL_FD); /* Force Duplex to FULL */ if (hw->media_type == e1000_media_type_copper && - hw->phy_type == e1000_phy_m88) + hw->phy_type == e1000_phy_m88) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ else { /* Set the ILOS bit on the fiber Nic is half @@ -1299,7 +1292,7 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter) * attempt this 10 times. */ while (e1000_nonintegrated_phy_loopback(adapter) && - count++ < 10); + count++ < 10); if (count < 11) return 0; } @@ -1348,8 +1341,9 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter) ew32(RCTL, rctl); return 0; } - } else if (hw->media_type == e1000_media_type_copper) + } else if (hw->media_type == e1000_media_type_copper) { return e1000_set_phy_loopback(adapter); + } return 7; } @@ -1391,13 +1385,13 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb, memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); } -static int e1000_check_lbtest_frame(struct sk_buff *skb, +static int e1000_check_lbtest_frame(const unsigned char *data, unsigned int frame_size) { frame_size &= ~1; - if (*(skb->data + 3) == 0xFF) { - if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && - (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { return 0; } } @@ -1410,7 +1404,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) struct e1000_tx_ring *txdr = &adapter->test_tx_ring; struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; - int i, j, k, l, lc, good_cnt, ret_val=0; + int i, j, k, l, lc, good_cnt, ret_val = 0; unsigned long time; ew32(RDT, rxdr->count - 1); @@ -1429,12 +1423,13 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) for (j = 0; j <= lc; j++) { /* loop count loop */ for (i = 0; i < 64; i++) { /* send the packets */ e1000_create_lbtest_frame(txdr->buffer_info[i].skb, - 1024); + 1024); dma_sync_single_for_device(&pdev->dev, txdr->buffer_info[k].dma, txdr->buffer_info[k].length, DMA_TO_DEVICE); - if (unlikely(++k == txdr->count)) k = 0; + if (unlikely(++k == txdr->count)) + k = 0; } ew32(TDT, k); E1000_WRITE_FLUSH(); @@ -1444,15 +1439,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) do { /* receive the sent packets */ dma_sync_single_for_cpu(&pdev->dev, rxdr->buffer_info[l].dma, - rxdr->buffer_info[l].length, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); ret_val = e1000_check_lbtest_frame( - rxdr->buffer_info[l].skb, + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, 1024); if (!ret_val) good_cnt++; - if (unlikely(++l == rxdr->count)) l = 0; + if (unlikely(++l == rxdr->count)) + l = 0; /* time + 20 msecs (200 msecs on 2.4) is more than * enough time to complete the receives, if it's * exceeded, break and error off @@ -1494,6 +1491,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) *data = 0; if (hw->media_type == e1000_media_type_internal_serdes) { int i = 0; + hw->serdes_has_link = false; /* On some blade server designs, link establishment @@ -1512,9 +1510,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) if (hw->autoneg) /* if auto_neg is set wait for it */ msleep(4000); - if (!(er32(STATUS) & E1000_STATUS_LU)) { + if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; - } } return *data; } @@ -1665,8 +1662,7 @@ static void e1000_get_wol(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC; + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; /* this function will set ->supported = 0 and return 1 if wol is not @@ -1819,6 +1815,7 @@ static int e1000_set_coalesce(struct net_device *netdev, static int e1000_nway_reset(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) e1000_reinit_locked(adapter); return 0; @@ -1830,22 +1827,29 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); int i; char *p = NULL; + const struct e1000_stats *stat = e1000_gstrings_stats; e1000_update_stats(adapter); for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { - switch (e1000_gstrings_stats[i].type) { + switch (stat->type) { case NETDEV_STATS: - p = (char *) netdev + - e1000_gstrings_stats[i].stat_offset; + p = (char *)netdev + stat->stat_offset; break; case E1000_STATS: - p = (char *) adapter + - e1000_gstrings_stats[i].stat_offset; + p = (char *)adapter + stat->stat_offset; + break; + default: + WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); break; } - data[i] = (e1000_gstrings_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + + stat++; } /* BUG_ON(i != E1000_STATS_LEN); */ } @@ -1858,8 +1862,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: - memcpy(data, *e1000_gstrings_test, - sizeof(e1000_gstrings_test)); + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); break; case ETH_SS_STATS: for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 1acf5034db10..45c8c864104e 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -4837,84 +4837,6 @@ void e1000_update_adaptive(struct e1000_hw *hw) } /** - * e1000_tbi_adjust_stats - * @hw: Struct containing variables accessed by shared code - * @frame_len: The length of the frame in question - * @mac_addr: The Ethernet destination address of the frame in question - * - * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT - */ -void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, - u32 frame_len, u8 *mac_addr) -{ - u64 carry_bit; - - /* First adjust the frame length. */ - frame_len--; - /* We need to adjust the statistics counters, since the hardware - * counters overcount this packet as a CRC error and undercount - * the packet as a good packet - */ - /* This packet should not be counted as a CRC error. */ - stats->crcerrs--; - /* This packet does count as a Good Packet Received. */ - stats->gprc++; - - /* Adjust the Good Octets received counters */ - carry_bit = 0x80000000 & stats->gorcl; - stats->gorcl += frame_len; - /* If the high bit of Gorcl (the low 32 bits of the Good Octets - * Received Count) was one before the addition, - * AND it is zero after, then we lost the carry out, - * need to add one to Gorch (Good Octets Received Count High). - * This could be simplified if all environments supported - * 64-bit integers. - */ - if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) - stats->gorch++; - /* Is this a broadcast or multicast? Check broadcast first, - * since the test for a multicast frame will test positive on - * a broadcast frame. - */ - if (is_broadcast_ether_addr(mac_addr)) - /* Broadcast packet */ - stats->bprc++; - else if (is_multicast_ether_addr(mac_addr)) - /* Multicast packet */ - stats->mprc++; - - if (frame_len == hw->max_frame_size) { - /* In this case, the hardware has overcounted the number of - * oversize frames. - */ - if (stats->roc > 0) - stats->roc--; - } - - /* Adjust the bin counters when the extra byte put the frame in the - * wrong bin. Remember that the frame_len was adjusted above. - */ - if (frame_len == 64) { - stats->prc64++; - stats->prc127--; - } else if (frame_len == 127) { - stats->prc127++; - stats->prc255--; - } else if (frame_len == 255) { - stats->prc255++; - stats->prc511--; - } else if (frame_len == 511) { - stats->prc511++; - stats->prc1023--; - } else if (frame_len == 1023) { - stats->prc1023++; - stats->prc1522--; - } else if (frame_len == 1522) { - stats->prc1522++; - } -} - -/** * e1000_get_bus_info * @hw: Struct containing variables accessed by shared code * diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h index 11578c8978db..5cf7268cc4e1 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.h +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -393,8 +393,6 @@ s32 e1000_blink_led_start(struct e1000_hw *hw); /* Everything else */ void e1000_reset_adaptive(struct e1000_hw *hw); void e1000_update_adaptive(struct e1000_hw *hw); -void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, - u32 frame_len, u8 * mac_addr); void e1000_get_bus_info(struct e1000_hw *hw); void e1000_pci_set_mwi(struct e1000_hw *hw); void e1000_pci_clear_mwi(struct e1000_hw *hw); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index ad3d5d12173f..5f6aded512f5 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1497,7 +1497,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter, struct pci_dev *pdev = adapter->pdev; int size; - size = sizeof(struct e1000_buffer) * txdr->count; + size = sizeof(struct e1000_tx_buffer) * txdr->count; txdr->buffer_info = vzalloc(size); if (!txdr->buffer_info) return -ENOMEM; @@ -1687,7 +1687,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter, struct pci_dev *pdev = adapter->pdev; int size, desc_len; - size = sizeof(struct e1000_buffer) * rxdr->count; + size = sizeof(struct e1000_rx_buffer) * rxdr->count; rxdr->buffer_info = vzalloc(size); if (!rxdr->buffer_info) return -ENOMEM; @@ -1947,8 +1947,9 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter) e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); } -static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, - struct e1000_buffer *buffer_info) +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) { if (buffer_info->dma) { if (buffer_info->mapped_as_page) @@ -1977,7 +1978,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring) { struct e1000_hw *hw = &adapter->hw; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned long size; unsigned int i; @@ -1989,7 +1990,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, } netdev_reset_queue(adapter->netdev); - size = sizeof(struct e1000_buffer) * tx_ring->count; + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ @@ -2053,6 +2054,28 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter) e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); } +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +static void e1000_free_frag(const void *data) +{ + put_page(virt_to_head_page(data)); +} + /** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure @@ -2062,44 +2085,42 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring) { struct e1000_hw *hw = &adapter->hw; - struct e1000_buffer *buffer_info; + struct e1000_rx_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; - /* Free all the Rx ring sk_buffs */ + /* Free all the Rx netfrags */ for (i = 0; i < rx_ring->count; i++) { buffer_info = &rx_ring->buffer_info[i]; - if (buffer_info->dma && - adapter->clean_rx == e1000_clean_rx_irq) { - dma_unmap_single(&pdev->dev, buffer_info->dma, - buffer_info->length, - DMA_FROM_DEVICE); - } else if (buffer_info->dma && - adapter->clean_rx == e1000_clean_jumbo_rx_irq) { - dma_unmap_page(&pdev->dev, buffer_info->dma, - buffer_info->length, - DMA_FROM_DEVICE); + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + e1000_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } } buffer_info->dma = 0; - if (buffer_info->page) { - put_page(buffer_info->page); - buffer_info->page = NULL; - } - if (buffer_info->skb) { - dev_kfree_skb(buffer_info->skb); - buffer_info->skb = NULL; - } } /* there also may be some cached data from a chained receive */ - if (rx_ring->rx_skb_top) { - dev_kfree_skb(rx_ring->rx_skb_top); - rx_ring->rx_skb_top = NULL; - } + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; - size = sizeof(struct e1000_buffer) * rx_ring->count; + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; memset(rx_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ @@ -2678,7 +2699,7 @@ static int e1000_tso(struct e1000_adapter *adapter, __be16 protocol) { struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned int i; u32 cmd_length = 0; u16 ipcse = 0, tucse, mss; @@ -2750,7 +2771,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, __be16 protocol) { struct e1000_context_desc *context_desc; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned int i; u8 css; u32 cmd_len = E1000_TXD_CMD_DEXT; @@ -2809,7 +2830,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, { struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned int len = skb_headlen(skb); unsigned int offset = 0, size, count = 0, i; unsigned int f, bytecount, segs; @@ -2955,7 +2976,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, { struct e1000_hw *hw = &adapter->hw; struct e1000_tx_desc *tx_desc = NULL; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; unsigned int i; @@ -3373,7 +3394,7 @@ static void e1000_dump(struct e1000_adapter *adapter) for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); - struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; struct my_u { __le64 a; __le64 b; }; struct my_u *u = (struct my_u *)tx_desc; const char *type; @@ -3415,7 +3436,7 @@ rx_ring_summary: for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); - struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; struct my_u { __le64 a; __le64 b; }; struct my_u *u = (struct my_u *)rx_desc; const char *type; @@ -3429,7 +3450,7 @@ rx_ring_summary: pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", i, le64_to_cpu(u->a), le64_to_cpu(u->b), - (u64)buffer_info->dma, buffer_info->skb, type); + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); } /* for */ /* dump the descriptor caches */ @@ -3811,7 +3832,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct e1000_tx_desc *tx_desc, *eop_desc; - struct e1000_buffer *buffer_info; + struct e1000_tx_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; unsigned int total_tx_bytes=0, total_tx_packets=0; @@ -3949,12 +3970,12 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, } /** - * e1000_consume_page - helper function + * e1000_consume_page - helper function for jumbo Rx path **/ -static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, u16 length) { - bi->page = NULL; + bi->rxbuf.page = NULL; skb->len += length; skb->data_len += length; skb->truesize += PAGE_SIZE; @@ -3981,6 +4002,113 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, } /** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = netdev_alloc_skb_ip_align(adapter->netdev, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure * @rx_ring: ring to clean @@ -3994,12 +4122,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do) { - struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc, *next_rxd; - struct e1000_buffer *buffer_info, *next_buffer; - unsigned long irq_flags; + struct e1000_rx_buffer *buffer_info, *next_buffer; u32 length; unsigned int i; int cleaned_count = 0; @@ -4020,8 +4146,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; if (++i == rx_ring->count) i = 0; next_rxd = E1000_RX_DESC(*rx_ring, i); @@ -4032,7 +4156,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, cleaned = true; cleaned_count++; dma_unmap_page(&pdev->dev, buffer_info->dma, - buffer_info->length, DMA_FROM_DEVICE); + adapter->rx_buffer_len, DMA_FROM_DEVICE); buffer_info->dma = 0; length = le16_to_cpu(rx_desc->length); @@ -4040,25 +4164,15 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* errors is only valid for DD + EOP descriptors */ if (unlikely((status & E1000_RXD_STAT_EOP) && (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { - u8 *mapped; - u8 last_byte; - - mapped = page_address(buffer_info->page); - last_byte = *(mapped + length - 1); - if (TBI_ACCEPT(hw, status, rx_desc->errors, length, - last_byte)) { - spin_lock_irqsave(&adapter->stats_lock, - irq_flags); - e1000_tbi_adjust_stats(hw, &adapter->stats, - length, mapped); - spin_unlock_irqrestore(&adapter->stats_lock, - irq_flags); + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; } else { - if (netdev->features & NETIF_F_RXALL) - goto process_skb; - /* recycle both page and skb */ - buffer_info->skb = skb; /* an error means any chain goes out the window * too */ @@ -4075,16 +4189,18 @@ process_skb: /* this descriptor is only the beginning (or middle) */ if (!rxtop) { /* this is the beginning of a chain */ - rxtop = skb; - skb_fill_page_desc(rxtop, 0, buffer_info->page, + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, 0, length); } else { /* this is the middle of a chain */ skb_fill_page_desc(rxtop, skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the skb, only consumed the page */ - buffer_info->skb = skb; + buffer_info->rxbuf.page, 0, length); } e1000_consume_page(buffer_info, rxtop, length); goto next_desc; @@ -4093,32 +4209,51 @@ process_skb: /* end of the chain */ skb_fill_page_desc(rxtop, skb_shinfo(rxtop)->nr_frags, - buffer_info->page, 0, length); - /* re-use the current skb, we only consumed the - * page - */ - buffer_info->skb = skb; + buffer_info->rxbuf.page, 0, length); skb = rxtop; rxtop = NULL; e1000_consume_page(buffer_info, skb, length); } else { + struct page *p; /* no chain, got EOP, this buf is the packet * copybreak to save the put_page/alloc_page */ - if (length <= copybreak && - skb_tailroom(skb) >= length) { + p = buffer_info->rxbuf.page; + if (length <= copybreak) { u8 *vaddr; - vaddr = kmap_atomic(buffer_info->page); + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); memcpy(skb_tail_pointer(skb), vaddr, length); kunmap_atomic(vaddr); /* re-use the page, so don't erase - * buffer_info->page + * buffer_info->rxbuf.page */ skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; } else { - skb_fill_page_desc(skb, 0, - buffer_info->page, 0, + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, length); e1000_consume_page(buffer_info, skb, length); @@ -4137,14 +4272,14 @@ process_skb: pskb_trim(skb, skb->len - 4); total_rx_packets++; - /* eth type trans needs skb->data to point to something */ - if (!pskb_may_pull(skb, ETH_HLEN)) { - e_err(drv, "pskb_may_pull failed.\n"); - dev_kfree_skb(skb); - goto next_desc; + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } - e1000_receive_skb(adapter, status, rx_desc->special, skb); + napi_gro_frags(&adapter->napi); next_desc: rx_desc->status = 0; @@ -4175,25 +4310,25 @@ next_desc: /* this should improve performance for small packets with large amounts * of reassembly being done in the stack */ -static void e1000_check_copybreak(struct net_device *netdev, - struct e1000_buffer *buffer_info, - u32 length, struct sk_buff **skb) +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) { - struct sk_buff *new_skb; + struct sk_buff *skb; if (length > copybreak) - return; + return NULL; - new_skb = netdev_alloc_skb_ip_align(netdev, length); - if (!new_skb) - return; + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); - skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, - (*skb)->data - NET_IP_ALIGN, - length + NET_IP_ALIGN); - /* save the skb in buffer_info as good */ - buffer_info->skb = *skb; - *skb = new_skb; + memcpy(skb_put(skb, length), data, length); + + return skb; } /** @@ -4207,12 +4342,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do) { - struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc, *next_rxd; - struct e1000_buffer *buffer_info, *next_buffer; - unsigned long flags; + struct e1000_rx_buffer *buffer_info, *next_buffer; u32 length; unsigned int i; int cleaned_count = 0; @@ -4225,6 +4358,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, while (rx_desc->status & E1000_RXD_STAT_DD) { struct sk_buff *skb; + u8 *data; u8 status; if (*work_done >= work_to_do) @@ -4233,10 +4367,27 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; - skb = buffer_info->skb; - buffer_info->skb = NULL; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } - prefetch(skb->data - NET_IP_ALIGN); + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } if (++i == rx_ring->count) i = 0; next_rxd = E1000_RX_DESC(*rx_ring, i); @@ -4246,11 +4397,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, cleaned = true; cleaned_count++; - dma_unmap_single(&pdev->dev, buffer_info->dma, - buffer_info->length, DMA_FROM_DEVICE); - buffer_info->dma = 0; - length = le16_to_cpu(rx_desc->length); /* !EOP means multiple descriptors were used to store a single * packet, if thats the case we need to toss it. In fact, we * to toss every packet with the EOP bit clear and the next @@ -4262,29 +4409,22 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (adapter->discarding) { /* All receives must fit into a single buffer */ - e_dbg("Receive packet consumed multiple buffers\n"); - /* recycle */ - buffer_info->skb = skb; + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + dev_kfree_skb(skb); if (status & E1000_RXD_STAT_EOP) adapter->discarding = false; goto next_desc; } if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { - u8 last_byte = *(skb->data + length - 1); - if (TBI_ACCEPT(hw, status, rx_desc->errors, length, - last_byte)) { - spin_lock_irqsave(&adapter->stats_lock, flags); - e1000_tbi_adjust_stats(hw, &adapter->stats, - length, skb->data); - spin_unlock_irqrestore(&adapter->stats_lock, - flags); + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; } else { - if (netdev->features & NETIF_F_RXALL) - goto process_skb; - /* recycle */ - buffer_info->skb = skb; + dev_kfree_skb(skb); goto next_desc; } } @@ -4299,9 +4439,10 @@ process_skb: */ length -= 4; - e1000_check_copybreak(netdev, buffer_info, length, &skb); - - skb_put(skb, length); + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); /* Receive Checksum Offload */ e1000_rx_checksum(adapter, @@ -4347,38 +4488,19 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, struct e1000_rx_ring *rx_ring, int cleaned_count) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc; - struct e1000_buffer *buffer_info; - struct sk_buff *skb; + struct e1000_rx_buffer *buffer_info; unsigned int i; - unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; i = rx_ring->next_to_use; buffer_info = &rx_ring->buffer_info[i]; while (cleaned_count--) { - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto check_page; - } - - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - if (unlikely(!skb)) { - /* Better luck next round */ - adapter->alloc_rx_buff_failed++; - break; - } - - buffer_info->skb = skb; - buffer_info->length = adapter->rx_buffer_len; -check_page: /* allocate a new page if necessary */ - if (!buffer_info->page) { - buffer_info->page = alloc_page(GFP_ATOMIC); - if (unlikely(!buffer_info->page)) { + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { adapter->alloc_rx_buff_failed++; break; } @@ -4386,17 +4508,15 @@ check_page: if (!buffer_info->dma) { buffer_info->dma = dma_map_page(&pdev->dev, - buffer_info->page, 0, - buffer_info->length, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - put_page(buffer_info->page); - dev_kfree_skb(skb); - buffer_info->page = NULL; - buffer_info->skb = NULL; + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; buffer_info->dma = 0; adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ + break; } } @@ -4432,11 +4552,9 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, int cleaned_count) { struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc; - struct e1000_buffer *buffer_info; - struct sk_buff *skb; + struct e1000_rx_buffer *buffer_info; unsigned int i; unsigned int bufsz = adapter->rx_buffer_len; @@ -4444,57 +4562,52 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, buffer_info = &rx_ring->buffer_info[i]; while (cleaned_count--) { - skb = buffer_info->skb; - if (skb) { - skb_trim(skb, 0); - goto map_skb; - } + void *data; - skb = netdev_alloc_skb_ip_align(netdev, bufsz); - if (unlikely(!skb)) { + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { /* Better luck next round */ adapter->alloc_rx_buff_failed++; break; } /* Fix for errata 23, can't cross 64kB boundary */ - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { - struct sk_buff *oldskb = skb; + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; e_err(rx_err, "skb align check failed: %u bytes at " - "%p\n", bufsz, skb->data); + "%p\n", bufsz, data); /* Try again, without freeing the previous */ - skb = netdev_alloc_skb_ip_align(netdev, bufsz); + data = e1000_alloc_frag(adapter); /* Failed allocation, critical failure */ - if (!skb) { - dev_kfree_skb(oldskb); + if (!data) { + e1000_free_frag(olddata); adapter->alloc_rx_buff_failed++; break; } - if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + if (!e1000_check_64k_bound(adapter, data, bufsz)) { /* give up */ - dev_kfree_skb(skb); - dev_kfree_skb(oldskb); + e1000_free_frag(data); + e1000_free_frag(olddata); adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ + break; } /* Use new allocation */ - dev_kfree_skb(oldskb); + e1000_free_frag(olddata); } - buffer_info->skb = skb; - buffer_info->length = adapter->rx_buffer_len; -map_skb: buffer_info->dma = dma_map_single(&pdev->dev, - skb->data, - buffer_info->length, + data, + adapter->rx_buffer_len, DMA_FROM_DEVICE); if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { - dev_kfree_skb(skb); - buffer_info->skb = NULL; + e1000_free_frag(data); buffer_info->dma = 0; adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ + break; } /* XXX if it was allocated cleanly it will never map to a @@ -4508,17 +4621,20 @@ map_skb: e_err(rx_err, "dma align check failed: %u bytes at " "%p\n", adapter->rx_buffer_len, (void *)(unsigned long)buffer_info->dma); - dev_kfree_skb(skb); - buffer_info->skb = NULL; dma_unmap_single(&pdev->dev, buffer_info->dma, adapter->rx_buffer_len, DMA_FROM_DEVICE); + + e1000_free_frag(data); + buffer_info->rxbuf.data = NULL; buffer_info->dma = 0; adapter->alloc_rx_buff_failed++; - break; /* while !buffer_info->skb */ + break; } + buffer_info->rxbuf.data = data; + skip: rx_desc = E1000_RX_DESC(*rx_ring, i); rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 801da392a20e..f1e33f896439 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -144,6 +144,8 @@ enum i40e_state_t { __I40E_PTP_TX_IN_PROGRESS, __I40E_BAD_EEPROM, __I40E_DOWN_REQUESTED, + __I40E_FD_FLUSH_REQUESTED, + __I40E_RESET_FAILED, }; enum i40e_interrupt_policy { @@ -250,6 +252,11 @@ struct i40e_pf { u16 fdir_pf_active_filters; u16 fd_sb_cnt_idx; u16 fd_atr_cnt_idx; + unsigned long fd_flush_timestamp; + u32 fd_flush_cnt; + u32 fd_add_err; + u32 fd_atr_cnt; + u32 fd_tcp_rule; #ifdef CONFIG_I40E_VXLAN __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; @@ -310,6 +317,7 @@ struct i40e_pf { u32 tx_timeout_count; u32 tx_timeout_recovery_level; unsigned long tx_timeout_last_recovery; + u32 tx_sluggish_count; u32 hw_csum_rx_error; u32 led_status; u16 corer_count; /* Core reset count */ @@ -608,6 +616,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, void i40e_fdir_check_and_reenable(struct i40e_pf *pf); int i40e_get_current_fd_count(struct i40e_pf *pf); int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf); +int i40e_get_current_atr_cnt(struct i40e_pf *pf); bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); void i40e_set_ethtool_ops(struct net_device *netdev); struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index b29c157b1f57..72f5d25a222f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -840,7 +840,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, /* bump the tail */ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); - i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; @@ -891,7 +892,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); - i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); /* update the error if time out occurred */ if ((!cmd_completed) && @@ -987,7 +988,8 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, e->msg_size); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); - i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf); + i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index df43e7c6777c..30056b25d94e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -75,13 +75,15 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer + * @buf_len: max length of buffer * * Dumps debug log about adminq command with descriptor contents. **/ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, - void *buffer) + void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u16 len = le16_to_cpu(aq_desc->datalen); u8 *aq_buffer = (u8 *)buffer; u32 data[4]; u32 i = 0; @@ -105,7 +107,9 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if ((buffer != NULL) && (aq_desc->datalen != 0)) { memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); - for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) { + if (buf_len < len) + len = buf_len; + for (i = 0; i < len; i++) { data[((i % 16) / 4)] |= ((u32)aq_buffer[i]) << (8 * (i % 4)); if ((i % 16) == 15) { @@ -748,6 +752,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) switch (hw->phy.link_info.phy_type) { case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: case I40E_PHY_TYPE_40GBASE_SR4: case I40E_PHY_TYPE_40GBASE_LR4: media = I40E_MEDIA_TYPE_FIBER; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 5a0cabeb35ed..7067f4b9159c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1356,6 +1356,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, "emp reset count: %d\n", pf->empr_count); dev_info(&pf->pdev->dev, "pf reset count: %d\n", pf->pfr_count); + dev_info(&pf->pdev->dev, + "pf tx sluggish count: %d\n", + pf->tx_sluggish_count); } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { struct i40e_aqc_query_port_ets_config_resp *bw_data; struct i40e_dcbx_config *cfg = diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index e8ba7470700a..1dda467ae1ac 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -145,6 +145,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_jabber", stats.rx_jabber), I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), @@ -312,7 +313,10 @@ static int i40e_get_settings(struct net_device *netdev, break; case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: + case I40E_PHY_TYPE_1000BASE_SX: + case I40E_PHY_TYPE_1000BASE_LX: ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->supported |= SUPPORTED_1000baseT_Full; break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: @@ -351,7 +355,8 @@ static int i40e_get_settings(struct net_device *netdev, break; default: /* if we got here and link is up something bad is afoot */ - WARN_ON(link_up); + netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n", + hw_link_info->phy_type); } no_valid_phy_type: @@ -461,7 +466,8 @@ static int i40e_set_settings(struct net_device *netdev, if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && - hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE) + hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && + hw->phy.link_info.link_info & I40E_AQ_LINK_UP) return -EOPNOTSUPP; /* get our own copy of the bits to check against */ @@ -492,11 +498,10 @@ static int i40e_set_settings(struct net_device *netdev, if (status) return -EAGAIN; - /* Copy link_speed and abilities to config in case they are not + /* Copy abilities to config in case autoneg is not * set below */ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); - config.link_speed = abilities.link_speed; config.abilities = abilities.abilities; /* Check autoneg */ @@ -533,42 +538,38 @@ static int i40e_set_settings(struct net_device *netdev, return -EINVAL; if (advertise & ADVERTISED_100baseT_Full) - if (!(abilities.link_speed & I40E_LINK_SPEED_100MB)) { - config.link_speed |= I40E_LINK_SPEED_100MB; - change = true; - } + config.link_speed |= I40E_LINK_SPEED_100MB; if (advertise & ADVERTISED_1000baseT_Full || advertise & ADVERTISED_1000baseKX_Full) - if (!(abilities.link_speed & I40E_LINK_SPEED_1GB)) { - config.link_speed |= I40E_LINK_SPEED_1GB; - change = true; - } + config.link_speed |= I40E_LINK_SPEED_1GB; if (advertise & ADVERTISED_10000baseT_Full || advertise & ADVERTISED_10000baseKX4_Full || advertise & ADVERTISED_10000baseKR_Full) - if (!(abilities.link_speed & I40E_LINK_SPEED_10GB)) { - config.link_speed |= I40E_LINK_SPEED_10GB; - change = true; - } + config.link_speed |= I40E_LINK_SPEED_10GB; if (advertise & ADVERTISED_40000baseKR4_Full || advertise & ADVERTISED_40000baseCR4_Full || advertise & ADVERTISED_40000baseSR4_Full || advertise & ADVERTISED_40000baseLR4_Full) - if (!(abilities.link_speed & I40E_LINK_SPEED_40GB)) { - config.link_speed |= I40E_LINK_SPEED_40GB; - change = true; - } + config.link_speed |= I40E_LINK_SPEED_40GB; - if (change) { + if (change || (abilities.link_speed != config.link_speed)) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; - /* If link is up set link and an so changes take effect */ - if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) - config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* set link and auto negotiation so changes take effect */ + config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + /* If link is up put link down */ + if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { + /* Tell the OS link is going down, the link will go + * back up when fw says it is ready asynchronously + */ + netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } /* make the aq call */ status = i40e_aq_set_phy_config(hw, &config, NULL); @@ -685,6 +686,13 @@ static int i40e_set_pauseparam(struct net_device *netdev, else return -EINVAL; + /* Tell the OS link is going down, the link will go back up when fw + * says it is ready asynchronously + */ + netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + /* Set the fc mode and only restart an if link is up*/ status = i40e_set_fc(hw, &aq_failures, link_up); @@ -1977,6 +1985,13 @@ static int i40e_del_fdir_entry(struct i40e_vsi *vsi, struct i40e_pf *pf = vsi->back; int ret = 0; + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return -EBUSY; + ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); i40e_fdir_check_and_reenable(pf); @@ -2010,6 +2025,13 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) return -ENOSPC; + if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + return -EBUSY; + + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return -EBUSY; + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index eddec6ba095b..ed5f1c15fb0f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -37,9 +37,9 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" -#define DRV_VERSION_MAJOR 0 -#define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 21 +#define DRV_VERSION_MAJOR 1 +#define DRV_VERSION_MINOR 0 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -1239,8 +1239,11 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address + * + * Some older firmware configurations set up a default promiscuous VLAN + * filter that needs to be removed. **/ -static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) +static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; @@ -1248,15 +1251,18 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) - return; + return -EINVAL; + memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); if (aq_ret) - dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n"); + return -ENOENT; + + return 0; } /** @@ -1385,18 +1391,30 @@ static int i40e_set_mac(struct net_device *netdev, void *p) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; struct sockaddr *addr = p; struct i40e_mac_filter *f; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - netdev_info(netdev, "set mac address=%pM\n", addr->sa_data); + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", + addr->sa_data); + return 0; + } if (test_bit(__I40E_DOWN, &vsi->back->state) || test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) return -EADDRNOTAVAIL; + if (ether_addr_equal(hw->mac.addr, addr->sa_data)) + netdev_info(netdev, "returning to hw mac address %pM\n", + hw->mac.addr); + else + netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; ret = i40e_aq_mac_address_write(&vsi->back->hw, @@ -1410,25 +1428,34 @@ static int i40e_set_mac(struct net_device *netdev, void *p) } } - f = i40e_find_mac(vsi, addr->sa_data, false, true); - if (!f) { - /* In order to be sure to not drop any packets, add the - * new address first then delete the old one. - */ - f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, - false, false); - if (!f) - return -ENOMEM; + if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { + struct i40e_aqc_remove_macvlan_element_data element; - i40e_sync_vsi_filters(vsi); + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, netdev->dev_addr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + } else { i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false); - i40e_sync_vsi_filters(vsi); } - f->is_laa = true; - if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) - ether_addr_copy(netdev->dev_addr, addr->sa_data); + if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { + struct i40e_aqc_add_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, hw->mac.addr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); + } else { + f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, + false, false); + if (f) + f->is_laa = true; + } + + i40e_sync_vsi_filters(vsi); + ether_addr_copy(netdev->dev_addr, addr->sa_data); return 0; } @@ -1796,9 +1823,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) kfree(add_list); add_list = NULL; - if (add_happened && (!aq_ret)) { - /* do nothing */; - } else if (add_happened && (aq_ret)) { + if (add_happened && aq_ret && + pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { dev_info(&pf->pdev->dev, "add filter failed, err %d, aq_err %d\n", aq_ret, pf->hw.aq.asq_last_status); @@ -4480,11 +4506,26 @@ static int i40e_up_complete(struct i40e_vsi *vsi) netif_carrier_on(vsi->netdev); } else if (vsi->netdev) { i40e_print_link_message(vsi, false); + /* need to check for qualified module here*/ + if ((pf->hw.phy.link_info.link_info & + I40E_AQ_MEDIA_AVAILABLE) && + (!(pf->hw.phy.link_info.an_info & + I40E_AQ_QUALIFIED_MODULE))) + netdev_err(vsi->netdev, + "the driver failed to link because an unqualified module was detected."); } /* replay FDIR SB filters */ - if (vsi->type == I40E_VSI_FDIR) + if (vsi->type == I40E_VSI_FDIR) { + /* reset fd counters */ + pf->fd_add_err = pf->fd_atr_cnt = 0; + if (pf->fd_tcp_rule > 0) { + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); + pf->fd_tcp_rule = 0; + } i40e_fdir_filter_restore(vsi); + } i40e_service_event_schedule(pf); return 0; @@ -5125,6 +5166,7 @@ int i40e_get_current_fd_count(struct i40e_pf *pf) I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); return fcnt_prog; } + /** * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled * @pf: board private structure @@ -5133,15 +5175,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) { u32 fcnt_prog, fcnt_avail; + if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) + return; + /* Check if, FD SB or ATR was auto disabled and if there is enough room * to re-enable */ - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->flags & I40E_FLAG_FD_SB_ENABLED)) - return; fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); fcnt_avail = pf->fdir_pf_filter_count; - if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { + if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || + (pf->fd_add_err == 0) || + (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; @@ -5158,23 +5202,84 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) } } +#define I40E_MIN_FD_FLUSH_INTERVAL 10 +/** + * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB + * @pf: board private structure + **/ +static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) +{ + int flush_wait_retry = 50; + int reg; + + if (time_after(jiffies, pf->fd_flush_timestamp + + (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) { + set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + pf->fd_flush_timestamp = jiffies; + pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; + pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; + /* flush all filters */ + wr32(&pf->hw, I40E_PFQF_CTL_1, + I40E_PFQF_CTL_1_CLEARFDTABLE_MASK); + i40e_flush(&pf->hw); + pf->fd_flush_cnt++; + pf->fd_add_err = 0; + do { + /* Check FD flush status every 5-6msec */ + usleep_range(5000, 6000); + reg = rd32(&pf->hw, I40E_PFQF_CTL_1); + if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK)) + break; + } while (flush_wait_retry--); + if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) { + dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); + } else { + /* replay sideband filters */ + i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); + + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; + pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); + dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); + } + } +} + +/** + * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed + * @pf: board private structure + **/ +int i40e_get_current_atr_cnt(struct i40e_pf *pf) +{ + return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; +} + +/* We can see up to 256 filter programming desc in transit if the filters are + * being applied really fast; before we see the first + * filter miss error on Rx queue 0. Accumulating enough error messages before + * reacting will make sure we don't cause flush too often. + */ +#define I40E_MAX_FD_PROGRAM_ERROR 256 + /** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure **/ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) { - if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) - return; /* if interface is down do nothing */ if (test_bit(__I40E_DOWN, &pf->state)) return; + + if ((pf->fd_add_err >= I40E_MAX_FD_PROGRAM_ERROR) && + (i40e_get_current_atr_cnt(pf) >= pf->fd_atr_cnt) && + (i40e_get_current_atr_cnt(pf) > pf->fdir_pf_filter_count)) + i40e_fdir_flush_and_replay(pf); + i40e_fdir_check_and_reenable(pf); - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && - (pf->flags & I40E_FLAG_FD_SB_ENABLED)) - pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT; } /** @@ -5184,7 +5289,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) **/ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) { - if (!vsi) + if (!vsi || test_bit(__I40E_DOWN, &vsi->state)) return; switch (vsi->type) { @@ -5420,6 +5525,13 @@ static void i40e_handle_link_event(struct i40e_pf *pf, memcpy(&pf->hw.phy.link_info_old, hw_link_info, sizeof(pf->hw.phy.link_info_old)); + /* check for unqualified module, if link is down */ + if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && + (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && + (!(status->link_info & I40E_AQ_LINK_UP))) + dev_err(&pf->pdev->dev, + "The driver failed to link because an unqualified module was detected.\n"); + /* update link status */ hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type; hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed; @@ -5456,6 +5568,10 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) u32 oldval; u32 val; + /* Do not run clean AQ when PF reset fails */ + if (test_bit(__I40E_RESET_FAILED, &pf->state)) + return; + /* check for error indications */ val = rd32(&pf->hw, pf->hw.aq.arq.len); oldval = val; @@ -5861,19 +5977,20 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) ret = i40e_pf_reset(hw); if (ret) { dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); - goto end_core_reset; + set_bit(__I40E_RESET_FAILED, &pf->state); + goto clear_recovery; } pf->pfr_count++; if (test_bit(__I40E_DOWN, &pf->state)) - goto end_core_reset; + goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ ret = i40e_init_adminq(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); - goto end_core_reset; + goto clear_recovery; } /* re-verify the eeprom if we just had an EMP reset */ @@ -5991,6 +6108,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) i40e_send_version(pf); end_core_reset: + clear_bit(__I40E_RESET_FAILED, &pf->state); +clear_recovery: clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); } @@ -6036,9 +6155,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_TX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; - dev_info(&pf->pdev->dev, - "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", - event, queue, pf_num, vf_num); + if (netif_msg_tx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n", + event, queue, pf_num, vf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } @@ -6050,9 +6169,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) I40E_GL_MDET_RX_EVENT_SHIFT; u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; - dev_info(&pf->pdev->dev, - "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", - event, queue, func); + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", + event, queue, func); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } @@ -6061,17 +6180,13 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); - dev_info(&pf->pdev->dev, - "MDD TX event is for this function 0x%08x, requesting PF reset.\n", - reg); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); pf_mdd_detected = true; } reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); - dev_info(&pf->pdev->dev, - "MDD RX event is for this function 0x%08x, requesting PF reset.\n", - reg); + dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); pf_mdd_detected = true; } /* Queue belongs to the PF, initiate a reset */ @@ -6088,14 +6203,16 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) if (reg & I40E_VP_MDET_TX_VALID_MASK) { wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i); + dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", + i); } reg = rd32(hw, I40E_VP_MDET_RX(i)); if (reg & I40E_VP_MDET_RX_VALID_MASK) { wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); vf->num_mdd_events++; - dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i); + dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", + i); } if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) { @@ -7086,6 +7203,11 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) } pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; + /* reset fd counters */ + pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; + pf->fdir_pf_active_filters = 0; + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); /* if ATR was auto disabled it can be re-enabled. */ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) @@ -7352,7 +7474,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_set_vf_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, - .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck, + .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, #ifdef CONFIG_I40E_VXLAN .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, @@ -7421,14 +7543,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); ether_addr_copy(mac_addr, hw->mac.perm_addr); - /* The following two steps are necessary to prevent reception - * of tagged packets - by default the NVM loads a MAC-VLAN - * filter that will accept any tagged packet. This is to - * prevent that during normal operations until a specific - * VLAN tag filter has been set. + /* The following steps are necessary to prevent reception + * of tagged packets - some older NVM configurations load a + * default a MAC-VLAN filter that accepts any tagged packet + * which must be replaced by a normal filter. */ - i40e_rm_default_mac_filter(vsi, mac_addr); - i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); + if (!i40e_rm_default_mac_filter(vsi, mac_addr)) + i40e_add_filter(vsi, mac_addr, + I40E_VLAN_ANY, false, true); } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", @@ -7644,7 +7766,22 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) f_count++; if (f->is_laa && vsi->type == I40E_VSI_MAIN) { - i40e_aq_mac_address_write(&vsi->back->hw, + struct i40e_aqc_remove_macvlan_element_data element; + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, f->macaddr); + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + ret = i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + if (ret) { + /* some older FW has a different default */ + element.flags |= + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(hw, vsi->seid, + &element, 1, NULL); + } + + i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, f->macaddr, NULL); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 949a9a01778b..0988b5c1fe87 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -52,10 +52,8 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); /* debug function for adminq */ -void i40e_debug_aq(struct i40e_hw *hw, - enum i40e_debug_mask mask, - void *desc, - void *buffer); +void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); void i40e_idle_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 369848e107f8..be039dd6114d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -224,15 +224,19 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); if (ret) { dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); err = true; } else { - dev_info(&pf->pdev->dev, - "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + if (add) + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); } - return err ? -EOPNOTSUPP : 0; } @@ -276,10 +280,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, tcp->source = fd_data->src_port; if (add) { + pf->fd_tcp_rule++; if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; } + } else { + pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ? + (pf->fd_tcp_rule - 1) : 0; + if (pf->fd_tcp_rule == 0) { + pf->flags |= I40E_FLAG_FD_ATR_ENABLED; + dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); + } } fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; @@ -287,12 +299,17 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, if (ret) { dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); err = true; } else { - dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + if (add) + dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); } return err ? -EOPNOTSUPP : 0; @@ -355,13 +372,18 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, if (ret) { dev_info(&pf->pdev->dev, - "Filter command send failed for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n", + fd_data->pctype, fd_data->fd_id, ret); err = true; } else { - dev_info(&pf->pdev->dev, - "Filter OK for PCTYPE %d (ret = %d)\n", - fd_data->pctype, ret); + if (add) + dev_info(&pf->pdev->dev, + "Filter OK for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); + else + dev_info(&pf->pdev->dev, + "Filter deleted for PCTYPE %d loc = %d\n", + fd_data->pctype, fd_data->fd_id); } } @@ -443,8 +465,14 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) { - dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", - rx_desc->wb.qword0.hi_dword.fd_id); + if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) || + (I40E_DEBUG_FD & pf->hw.debug_mask)) + dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", + rx_desc->wb.qword0.hi_dword.fd_id); + + pf->fd_add_err++; + /* store the current atr filter count */ + pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); /* filter programming failed most likely due to table full */ fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf); @@ -454,29 +482,21 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, * FD ATR/SB and then re-enable it when there is room. */ if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { - /* Turn off ATR first */ - if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && + if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && !(pf->auto_disable_flags & - I40E_FLAG_FD_ATR_ENABLED)) { - dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n"); - pf->auto_disable_flags |= - I40E_FLAG_FD_ATR_ENABLED; - pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; - } else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && - !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); pf->auto_disable_flags |= I40E_FLAG_FD_SB_ENABLED; - pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; } } else { - dev_info(&pdev->dev, "FD filter programming error\n"); + dev_info(&pdev->dev, + "FD filter programming failed due to incorrect filter parameters\n"); } } else if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { if (I40E_DEBUG_FD & pf->hw.debug_mask) - dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n", + dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", rx_desc->wb.qword0.hi_dword.fd_id); } } @@ -587,6 +607,7 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) { u32 tx_pending = i40e_get_tx_pending(tx_ring); + struct i40e_pf *pf = tx_ring->vsi->back; bool ret = false; clear_check_for_tx_hang(tx_ring); @@ -603,10 +624,17 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * pending but without time to complete it yet. */ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - tx_pending) { + (tx_pending >= I40E_MIN_DESC_PENDING)) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); + } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && + (tx_pending < I40E_MIN_DESC_PENDING) && + (tx_pending > 0)) { + if (I40E_DEBUG_FLOW & pf->hw.debug_mask) + dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", + tx_pending, tx_ring->queue_index); + pf->tx_sluggish_count++; } else { /* update completed stats and disarm the hang check */ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; @@ -1213,7 +1241,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); - skb->encapsulation = ipv4_tunnel || ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; /* Rx csum enabled and ip headers found? */ @@ -1287,6 +1314,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, } skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = ipv4_tunnel || ipv6_tunnel; return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 73f4fa425697..d7a625a6a14f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -121,6 +121,7 @@ enum i40e_dyn_idx_t { /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define I40E_MIN_DESC_PENDING 4 #define I40E_TX_FLAGS_CSUM (u32)(1) #define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3ac6a0d2f143..4eeed267e4b7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -73,7 +73,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id, { struct i40e_pf *pf = vf->pf; - return qid < pf->vsi[vsi_id]->num_queue_pairs; + return qid < pf->vsi[vsi_id]->alloc_queue_pairs; } /** @@ -350,6 +350,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx, rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.prefena = 1; + rx_ctx.l2tsel = 1; /* clear the context in the HMC */ ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id); @@ -468,7 +469,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg); /* map PF queues to VF queues */ - for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) { + for (j = 0; j < pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; j++) { u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j); reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK); wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg); @@ -477,7 +478,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) /* map PF queues to VSI */ for (j = 0; j < 7; j++) { - if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) { + if (j * 2 >= pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs) { reg = 0x07FF07FF; /* unused */ } else { u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, @@ -584,7 +585,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); if (ret) goto error_alloc; - total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs; + total_queue_pairs += pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); /* store the total qps number for the runtime @@ -706,35 +707,6 @@ complete_reset: wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); i40e_flush(hw); } - -/** - * i40e_vfs_are_assigned - * @pf: pointer to the pf structure - * - * Determine if any VFs are assigned to VMs - **/ -static bool i40e_vfs_are_assigned(struct i40e_pf *pf) -{ - struct pci_dev *pdev = pf->pdev; - struct pci_dev *vfdev; - - /* loop through all the VFs to see if we own any that are assigned */ - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF , NULL); - while (vfdev) { - /* if we don't own it we don't care */ - if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) { - /* if it is assigned we cannot release it */ - if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) - return true; - } - - vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, - I40E_DEV_ID_VF, - vfdev); - } - - return false; -} #ifdef CONFIG_PCI_IOV /** @@ -842,7 +814,7 @@ void i40e_free_vfs(struct i40e_pf *pf) * assigned. Setting the number of VFs to 0 through sysfs is caught * before this function ever gets called. */ - if (!i40e_vfs_are_assigned(pf)) { + if (!pci_vfs_assigned(pf->pdev)) { pci_disable_sriov(pf->pdev); /* Acknowledge VFLR for all VFS. Without this, VFs will fail to * work correctly when SR-IOV gets re-enabled. @@ -979,7 +951,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) if (num_vfs) return i40e_pci_sriov_enable(pdev, num_vfs); - if (!i40e_vfs_are_assigned(pf)) { + if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); @@ -1123,7 +1095,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf) vfres->vsi_res[i].vsi_id = vf->lan_vsi_index; vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; vfres->vsi_res[i].num_queue_pairs = - pf->vsi[vf->lan_vsi_index]->num_queue_pairs; + pf->vsi[vf->lan_vsi_index]->alloc_queue_pairs; memcpy(vfres->vsi_res[i].default_mac_addr, vf->default_lan_addr.addr, ETH_ALEN); i++; @@ -1209,6 +1181,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_virtchnl_vsi_queue_config_info *qci = (struct i40e_virtchnl_vsi_queue_config_info *)msg; struct i40e_virtchnl_queue_pair_info *qpi; + struct i40e_pf *pf = vf->pf; u16 vsi_id, vsi_queue_id; i40e_status aq_ret = 0; int i; @@ -1242,6 +1215,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } } + /* set vsi num_queue_pairs in use to num configured by vf */ + pf->vsi[vf->lan_vsi_index]->num_queue_pairs = qci->num_queue_pairs; error_param: /* send the response to the vf */ @@ -2094,7 +2069,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* Force the VF driver stop so it has to reload with new MAC address */ i40e_vc_disable_vf(pf, vf); dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); - ret = 0; error_param: return ret; @@ -2419,7 +2393,7 @@ error_out: * * Enable or disable VF spoof checking **/ -int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable) +int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 63e7e0d81ad2..0adc61e1052d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -122,7 +122,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); -int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable); +int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 003006033614..f206be917842 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -788,7 +788,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, /* bump the tail */ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, + buff, buff_size); (hw->aq.asq.next_to_use)++; if (hw->aq.asq.next_to_use == hw->aq.asq.count) hw->aq.asq.next_to_use = 0; @@ -842,7 +843,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer writeback:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, + buff_size); /* update the error if time out occurred */ if ((!cmd_completed) && @@ -938,7 +940,8 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, hw->aq.nvm_busy = false; i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); - i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf); + i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, + hw->aq.arq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 4ea90bf239bb..952560551964 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -75,13 +75,15 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) * @mask: debug mask * @desc: pointer to admin queue descriptor * @buffer: pointer to command buffer + * @buf_len: max length of buffer * * Dumps debug log about adminq command with descriptor contents. **/ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, - void *buffer) + void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; + u16 len = le16_to_cpu(aq_desc->datalen); u8 *aq_buffer = (u8 *)buffer; u32 data[4]; u32 i = 0; @@ -105,7 +107,9 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, if ((buffer != NULL) && (aq_desc->datalen != 0)) { memset(data, 0, sizeof(data)); i40e_debug(hw, mask, "AQ CMD Buffer:\n"); - for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) { + if (buf_len < len) + len = buf_len; + for (i = 0; i < len; i++) { data[((i % 16) / 4)] |= ((u32)aq_buffer[i]) << (8 * (i % 4)); if ((i % 16) == 15) { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 849edcc2e398..9173834825ac 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -53,10 +53,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw, bool i40evf_asq_done(struct i40e_hw *hw); /* debug function for adminq */ -void i40evf_debug_aq(struct i40e_hw *hw, - enum i40e_debug_mask mask, - void *desc, - void *buffer); +void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); void i40e_idle_aq(struct i40e_hw *hw); void i40evf_resume_aq(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 95a3ec236b49..04c7c1557a0c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -163,11 +163,13 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * pending but without time to complete it yet. */ if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - tx_pending) { + (tx_pending >= I40E_MIN_DESC_PENDING)) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } else { + } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || + !(tx_pending < I40E_MIN_DESC_PENDING) || + !(tx_pending > 0)) { /* update completed stats and disarm the hang check */ tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); @@ -744,7 +746,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); - skb->encapsulation = ipv4_tunnel || ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; /* Rx csum enabled and ip headers found? */ @@ -818,6 +819,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, } skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = ipv4_tunnel || ipv6_tunnel; return; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 8bc6858163b0..f6dcf9dd9290 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -121,6 +121,7 @@ enum i40e_dyn_idx_t { /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#define I40E_MIN_DESC_PENDING 4 #define I40E_TX_FLAGS_CSUM (u32)(1) #define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 38429fae4fcf..c51bc7a33bc5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = "Intel(R) XL710/X710 Virtual Function Network Driver"; -#define DRV_VERSION "0.9.40" +#define DRV_VERSION "1.0.5" const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation."; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 236a6183a865..051ea94bdcd3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -2548,11 +2548,13 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) /** * igb_set_eee_i350 - Enable/disable EEE support * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement * * Enable/disable EEE based on setting in dev_spec structure. * **/ -s32 igb_set_eee_i350(struct e1000_hw *hw) +s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) { u32 ipcnfg, eeer; @@ -2566,7 +2568,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw) if (!(hw->dev_spec._82575.eee_disable)) { u32 eee_su = rd32(E1000_EEE_SU); - ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | E1000_EEER_LPI_FC); @@ -2593,11 +2604,13 @@ out: /** * igb_set_eee_i354 - Enable/disable EEE support * @hw: pointer to the HW structure + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement * * Enable/disable EEE legacy mode based on setting in dev_spec structure. * **/ -s32 igb_set_eee_i354(struct e1000_hw *hw) +s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = 0; @@ -2636,8 +2649,16 @@ s32 igb_set_eee_i354(struct e1000_hw *hw) if (ret_val) goto out; - phy_data |= E1000_EEE_ADV_100_SUPPORTED | - E1000_EEE_ADV_1000_SUPPORTED; + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, E1000_EEE_ADV_DEV_I354, phy_data); diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index b407c55738fa..2154aea7aa7e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -263,8 +263,8 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); u16 igb_rxpbs_adjust_82580(u32 data); s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); -s32 igb_set_eee_i350(struct e1000_hw *); -s32 igb_set_eee_i354(struct e1000_hw *); +s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M); +s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M); s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status); #define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index c737d1f40838..02cfd3b14762 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2675,6 +2675,7 @@ static int igb_set_eee(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct ethtool_eee eee_curr; + bool adv1g_eee = true, adv100m_eee = true; s32 ret_val; if ((hw->mac.type < e1000_i350) || @@ -2701,12 +2702,14 @@ static int igb_set_eee(struct net_device *netdev, return -EINVAL; } - if (edata->advertised & - ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { + if (!edata->advertised || (edata->advertised & + ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { dev_err(&adapter->pdev->dev, - "EEE Advertisement supports only 100Tx and or 100T full duplex\n"); + "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); return -EINVAL; } + adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); + adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); } else if (!edata->eee_enabled) { dev_err(&adapter->pdev->dev, @@ -2718,10 +2721,6 @@ static int igb_set_eee(struct net_device *netdev, if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { hw->dev_spec._82575.eee_disable = !edata->eee_enabled; adapter->flags |= IGB_FLAG_EEE; - if (hw->mac.type == e1000_i350) - igb_set_eee_i350(hw); - else - igb_set_eee_i354(hw); /* reset link */ if (netif_running(netdev)) @@ -2730,6 +2729,17 @@ static int igb_set_eee(struct net_device *netdev, igb_reset(adapter); } + if (hw->mac.type == e1000_i354) + ret_val = igb_set_eee_i354(hw, adv1g_eee, adv100m_eee); + else + ret_val = igb_set_eee_i350(hw, adv1g_eee, adv100m_eee); + + if (ret_val) { + dev_err(&adapter->pdev->dev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + return 0; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index cb14bbdfb056..6cf0c17ad9c4 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2012,10 +2012,10 @@ void igb_reset(struct igb_adapter *adapter) case e1000_i350: case e1000_i210: case e1000_i211: - igb_set_eee_i350(hw); + igb_set_eee_i350(hw, true, true); break; case e1000_i354: - igb_set_eee_i354(hw); + igb_set_eee_i354(hw, true, true); break; default: break; @@ -2619,7 +2619,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case e1000_i210: case e1000_i211: /* Enable EEE for internal copper PHY devices */ - err = igb_set_eee_i350(hw); + err = igb_set_eee_i350(hw, true, true); if ((!err) && (!hw->dev_spec._82575.eee_disable)) { adapter->eee_advert = @@ -2630,7 +2630,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case e1000_i354: if ((rd32(E1000_CTRL_EXT) & E1000_CTRL_EXT_LINK_MODE_SGMII)) { - err = igb_set_eee_i354(hw); + err = igb_set_eee_i354(hw, true, true); if ((!err) && (!hw->dev_spec._82575.eee_disable)) { adapter->eee_advert = @@ -4813,6 +4813,41 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring, tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } +static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + struct net_device *netdev = tx_ring->netdev; + + netif_stop_subqueue(netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (igb_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_wake_subqueue(netdev, tx_ring->queue_index); + + u64_stats_update_begin(&tx_ring->tx_syncp2); + tx_ring->tx_stats.restart_queue2++; + u64_stats_update_end(&tx_ring->tx_syncp2); + + return 0; +} + +static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) +{ + if (igb_desc_unused(tx_ring) >= size) + return 0; + return __igb_maybe_stop_tx(tx_ring, size); +} + static void igb_tx_map(struct igb_ring *tx_ring, struct igb_tx_buffer *first, const u8 hdr_len) @@ -4915,13 +4950,17 @@ static void igb_tx_map(struct igb_ring *tx_ring, tx_ring->next_to_use = i; - writel(i, tx_ring->tail); + /* Make sure there is space in the ring for the next send. */ + igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - /* we need this if more than one processor can write to our tail - * at a time, it synchronizes IO on IA64/Altix systems - */ - mmiowb(); + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + writel(i, tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); + } return; dma_error: @@ -4941,41 +4980,6 @@ dma_error: tx_ring->next_to_use = i; } -static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) -{ - struct net_device *netdev = tx_ring->netdev; - - netif_stop_subqueue(netdev, tx_ring->queue_index); - - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. - */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. - */ - if (igb_desc_unused(tx_ring) < size) - return -EBUSY; - - /* A reprieve! */ - netif_wake_subqueue(netdev, tx_ring->queue_index); - - u64_stats_update_begin(&tx_ring->tx_syncp2); - tx_ring->tx_stats.restart_queue2++; - u64_stats_update_end(&tx_ring->tx_syncp2); - - return 0; -} - -static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) -{ - if (igb_desc_unused(tx_ring) >= size) - return 0; - return __igb_maybe_stop_tx(tx_ring, size); -} - netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, struct igb_ring *tx_ring) { @@ -5046,9 +5050,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, igb_tx_map(tx_ring, first, hdr_len); - /* Make sure there is space in the ring for the next send. */ - igb_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; out_drop: @@ -6768,113 +6769,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring, } /** - * igb_get_headlen - determine size of header for LRO/GRO - * @data: pointer to the start of the headers - * @max_len: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, and GRO offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - **/ -static unsigned int igb_get_headlen(unsigned char *data, - unsigned int max_len) -{ - union { - unsigned char *network; - /* l2 headers */ - struct ethhdr *eth; - struct vlan_hdr *vlan; - /* l3 headers */ - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - } hdr; - __be16 protocol; - u8 nexthdr = 0; /* default to not TCP */ - u8 hlen; - - /* this should never happen, but better safe than sorry */ - if (max_len < ETH_HLEN) - return max_len; - - /* initialize network frame pointer */ - hdr.network = data; - - /* set first protocol and move network header forward */ - protocol = hdr.eth->h_proto; - hdr.network += ETH_HLEN; - - /* handle any vlan tag if present */ - if (protocol == htons(ETH_P_8021Q)) { - if ((hdr.network - data) > (max_len - VLAN_HLEN)) - return max_len; - - protocol = hdr.vlan->h_vlan_encapsulated_proto; - hdr.network += VLAN_HLEN; - } - - /* handle L3 protocols */ - if (protocol == htons(ETH_P_IP)) { - if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) - return max_len; - - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return hdr.network - data; - - /* record next protocol if header is present */ - if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) - nexthdr = hdr.ipv4->protocol; - } else if (protocol == htons(ETH_P_IPV6)) { - if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) - return max_len; - - /* record next protocol */ - nexthdr = hdr.ipv6->nexthdr; - hlen = sizeof(struct ipv6hdr); - } else { - return hdr.network - data; - } - - /* relocate pointer to start of L4 header */ - hdr.network += hlen; - - /* finally sort out TCP */ - if (nexthdr == IPPROTO_TCP) { - if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) - return max_len; - - /* access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[12] & 0xF0) >> 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return hdr.network - data; - - hdr.network += hlen; - } else if (nexthdr == IPPROTO_UDP) { - if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) - return max_len; - - hdr.network += sizeof(struct udphdr); - } - - /* If everything has gone correctly hdr.network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((hdr.network - data) < max_len) - return hdr.network - data; - else - return max_len; -} - -/** * igb_pull_tail - igb specific version of skb_pull_tail * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -6918,7 +6812,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring, /* we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); + pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ac9f2148cdc5..673d82095779 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -386,119 +386,87 @@ struct ixgbe_q_vector { char name[IFNAMSIZ + 9]; #ifdef CONFIG_NET_RX_BUSY_POLL - unsigned int state; -#define IXGBE_QV_STATE_IDLE 0 -#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ -#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ -#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */ -#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) -#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED) -#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ -#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ -#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) -#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) - spinlock_t lock; + atomic_t state; #endif /* CONFIG_NET_RX_BUSY_POLL */ /* for dynamic allocation of rings associated with this q_vector */ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; }; + #ifdef CONFIG_NET_RX_BUSY_POLL +enum ixgbe_qv_state_t { + IXGBE_QV_STATE_IDLE = 0, + IXGBE_QV_STATE_NAPI, + IXGBE_QV_STATE_POLL, + IXGBE_QV_STATE_DISABLE +}; + static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) { - - spin_lock_init(&q_vector->lock); - q_vector->state = IXGBE_QV_STATE_IDLE; + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); } /* called from the device poll routine to get ownership of a q_vector */ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) { - int rc = true; - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBE_QV_LOCKED) { - WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); - q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; - rc = false; + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_NAPI); #ifdef BP_EXTENDED_STATS + if (rc != IXGBE_QV_STATE_IDLE) q_vector->tx.ring->stats.yields++; #endif - } else { - /* we don't care if someone yielded */ - q_vector->state = IXGBE_QV_STATE_NAPI; - } - spin_unlock_bh(&q_vector->lock); - return rc; + + return rc == IXGBE_QV_STATE_IDLE; } /* returns true is someone tried to get the qv while napi had it */ -static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) { - int rc = false; - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | - IXGBE_QV_STATE_NAPI_YIELD)); - - if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) - rc = true; - /* will reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBE_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); + + /* flush any outstanding Rx frames */ + if (q_vector->napi.gro_list) + napi_gro_flush(&q_vector->napi, false); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); } /* called from ixgbe_low_latency_poll() */ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) { - int rc = true; - spin_lock_bh(&q_vector->lock); - if ((q_vector->state & IXGBE_QV_LOCKED)) { - q_vector->state |= IXGBE_QV_STATE_POLL_YIELD; - rc = false; + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_POLL); #ifdef BP_EXTENDED_STATS - q_vector->rx.ring->stats.yields++; + if (rc != IXGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; #endif - } else { - /* preserve yield marks */ - q_vector->state |= IXGBE_QV_STATE_POLL; - } - spin_unlock_bh(&q_vector->lock); - return rc; + return rc == IXGBE_QV_STATE_IDLE; } /* returns true if someone tried to get the qv while it was locked */ -static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) { - int rc = false; - spin_lock_bh(&q_vector->lock); - WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI)); - - if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) - rc = true; - /* will reset state to idle, unless QV is disabled */ - q_vector->state &= IXGBE_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - return rc; + WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); + + /* reset state to idle */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); } /* true if a socket is polling, even if it did not get the lock */ static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) { - WARN_ON(!(q_vector->state & IXGBE_QV_OWNED)); - return q_vector->state & IXGBE_QV_USER_PEND; + return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; } /* false if QV is currently owned */ static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) { - int rc = true; - spin_lock_bh(&q_vector->lock); - if (q_vector->state & IXGBE_QV_OWNED) - rc = false; - q_vector->state |= IXGBE_QV_STATE_DISABLED; - spin_unlock_bh(&q_vector->lock); - - return rc; + int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, + IXGBE_QV_STATE_DISABLE); + + return rc == IXGBE_QV_STATE_IDLE; } #else /* CONFIG_NET_RX_BUSY_POLL */ @@ -643,9 +611,7 @@ struct ixgbe_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) #define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) #define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) @@ -760,8 +726,6 @@ struct ixgbe_adapter { u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 wol; - u16 bd_number; - u16 eeprom_verh; u16 eeprom_verl; u16 eeprom_cap; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e4100b5737b6..cff383b1cbb0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1303,7 +1303,7 @@ static const struct ixgbe_reg_test reg_test_82599[] = { { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } + { .reg = 0 } }; /* default 82598 register test */ @@ -1331,7 +1331,7 @@ static const struct ixgbe_reg_test reg_test_82598[] = { { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, - { 0, 0, 0, 0 } + { .reg = 0 } }; static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 2d9451e39686..ce40c77381e9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -696,46 +696,83 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) ixgbe_set_rss_queues(adapter); } -static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, - int vectors) +/** + * ixgbe_acquire_msix_vectors - acquire MSI-X vectors + * @adapter: board private structure + * + * Attempts to acquire a suitable range of MSI-X vector interrupts. Will + * return a negative error code if unable to acquire MSI-X vectors for any + * reason. + */ +static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) { - int vector_threshold; + struct ixgbe_hw *hw = &adapter->hw; + int i, vectors, vector_threshold; + + /* We start by asking for one vector per queue pair */ + vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); - /* We'll want at least 2 (vector_threshold): - * 1) TxQ[0] + RxQ[0] handler - * 2) Other (Link Status Change, etc.) + /* It is easy to be greedy for MSI-X vectors. However, it really + * doesn't do much good if we have a lot more vectors than CPUs. We'll + * be somewhat conservative and only ask for (roughly) the same number + * of vectors as there are CPUs. */ - vector_threshold = MIN_MSIX_COUNT; + vectors = min_t(int, vectors, num_online_cpus()); - /* - * The more we get, the more we will assign to Tx/Rx Cleanup - * for the separate queues...where Rx Cleanup >= Tx Cleanup. - * Right now, we simply care about how many we'll get; we'll - * set them up later while requesting irq's. + /* Some vectors are necessary for non-queue interrupts */ + vectors += NON_Q_VECTORS; + + /* Hardware can only support a maximum of hw.mac->max_msix_vectors. + * With features such as RSS and VMDq, we can easily surpass the + * number of Rx and Tx descriptor queues supported by our device. + * Thus, we cap the maximum in the rare cases where the CPU count also + * exceeds our vector limit */ + vectors = min_t(int, vectors, hw->mac.max_msix_vectors); + + /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] + * handler, and (2) an Other (Link Status Change, etc.) handler. + */ + vector_threshold = MIN_MSIX_COUNT; + + adapter->msix_entries = kcalloc(vectors, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->msix_entries) + return -ENOMEM; + + for (i = 0; i < vectors; i++) + adapter->msix_entries[i].entry = i; + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, vector_threshold, vectors); if (vectors < 0) { - /* Can't allocate enough MSI-X interrupts? Oh well. - * This just means we'll go with either a single MSI - * vector or fall back to legacy interrupts. + /* A negative count of allocated vectors indicates an error in + * acquiring within the specified range of MSI-X vectors */ - netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, - "Unable to allocate MSI-X interrupts\n"); + e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", + vectors); + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; kfree(adapter->msix_entries); adapter->msix_entries = NULL; - } else { - adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ - /* - * Adjust for only the vectors we'll use, which is minimum - * of max_msix_q_vectors + NON_Q_VECTORS, or the number of - * vectors we were allocated. - */ - vectors -= NON_Q_VECTORS; - adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); + + return vectors; } + + /* we successfully allocated some number of vectors within our + * requested range. + */ + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; + + /* Adjust for only the vectors we'll use, which is minimum + * of max_q_vectors, or the number of vectors we were allocated. + */ + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); + + return 0; } static void ixgbe_add_ring(struct ixgbe_ring *ring, @@ -807,6 +844,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ixgbe_poll, 64); napi_hash_add(&q_vector->napi); +#ifdef CONFIG_NET_RX_BUSY_POLL + /* initialize busy poll */ + atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); + +#endif /* tie q_vector and adapter together */ adapter->q_vector[v_idx] = q_vector; q_vector->adapter = adapter; @@ -1049,46 +1091,20 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) **/ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; - int vector, v_budget, err; + int err; - /* - * It's easy to be greedy for MSI-X vectors, but it really - * doesn't do us much good if we have a lot more vectors - * than CPU's. So let's be conservative and only ask for - * (roughly) the same number of vectors as there are CPU's. - * The default is to use pairs of vectors. - */ - v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); - v_budget = min_t(int, v_budget, num_online_cpus()); - v_budget += NON_Q_VECTORS; + /* We will try to get MSI-X interrupts first */ + if (!ixgbe_acquire_msix_vectors(adapter)) + return; - /* - * At the same time, hardware can only support a maximum of - * hw.mac->max_msix_vectors vectors. With features - * such as RSS and VMDq, we can easily surpass the number of Rx and Tx - * descriptor queues supported by our device. Thus, we cap it off in - * those rare cases where the cpu count also exceeds our vector limit. + /* At this point, we do not have MSI-X capabilities. We need to + * reconfigure or disable various features which require MSI-X + * capability. */ - v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); - - /* A failure in MSI-X entry allocation isn't fatal, but it does - * mean we disable MSI-X capabilities of the adapter. */ - adapter->msix_entries = kcalloc(v_budget, - sizeof(struct msix_entry), GFP_KERNEL); - if (adapter->msix_entries) { - for (vector = 0; vector < v_budget; vector++) - adapter->msix_entries[vector].entry = vector; - - ixgbe_acquire_msix_vectors(adapter, v_budget); - - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - return; - } - /* disable DCB if number of TCs exceeds 1 */ + /* Disable DCB unless we only have a single traffic class */ if (netdev_get_num_tc(adapter->netdev) > 1) { - e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); + e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); netdev_reset_tc(adapter->netdev); if (adapter->hw.mac.type == ixgbe_mac_82598EB) @@ -1098,26 +1114,30 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; } + adapter->dcb_cfg.num_tcs.pg_tcs = 1; adapter->dcb_cfg.num_tcs.pfc_tcs = 1; - /* disable SR-IOV */ + /* Disable SR-IOV support */ + e_dev_warn("Disabling SR-IOV support\n"); ixgbe_disable_sriov(adapter); - /* disable RSS */ + /* Disable RSS */ + e_dev_warn("Disabling RSS support\n"); adapter->ring_feature[RING_F_RSS].limit = 1; + /* recalculate number of queues now that many features have been + * changed or disabled. + */ ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; err = pci_enable_msi(adapter->pdev); - if (err) { - netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, - "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n", - err); - return; - } - adapter->flags |= IXGBE_FLAG_MSI_ENABLED; + if (err) + e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", + err); + else + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 87bd53fdd209..06ef5a32a893 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -440,7 +440,7 @@ static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { {IXGBE_TXDCTL(0), "TXDCTL"}, /* List Terminator */ - {} + { .name = NULL } }; @@ -1521,120 +1521,6 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) ixgbe_release_rx_desc(rx_ring, i); } -/** - * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE - * @data: pointer to the start of the headers - * @max_len: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, GRO, and RSC offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - **/ -static unsigned int ixgbe_get_headlen(unsigned char *data, - unsigned int max_len) -{ - union { - unsigned char *network; - /* l2 headers */ - struct ethhdr *eth; - struct vlan_hdr *vlan; - /* l3 headers */ - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - } hdr; - __be16 protocol; - u8 nexthdr = 0; /* default to not TCP */ - u8 hlen; - - /* this should never happen, but better safe than sorry */ - if (max_len < ETH_HLEN) - return max_len; - - /* initialize network frame pointer */ - hdr.network = data; - - /* set first protocol and move network header forward */ - protocol = hdr.eth->h_proto; - hdr.network += ETH_HLEN; - - /* handle any vlan tag if present */ - if (protocol == htons(ETH_P_8021Q)) { - if ((hdr.network - data) > (max_len - VLAN_HLEN)) - return max_len; - - protocol = hdr.vlan->h_vlan_encapsulated_proto; - hdr.network += VLAN_HLEN; - } - - /* handle L3 protocols */ - if (protocol == htons(ETH_P_IP)) { - if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) - return max_len; - - /* access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[0] & 0x0F) << 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return hdr.network - data; - - /* record next protocol if header is present */ - if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) - nexthdr = hdr.ipv4->protocol; - } else if (protocol == htons(ETH_P_IPV6)) { - if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) - return max_len; - - /* record next protocol */ - nexthdr = hdr.ipv6->nexthdr; - hlen = sizeof(struct ipv6hdr); -#ifdef IXGBE_FCOE - } else if (protocol == htons(ETH_P_FCOE)) { - if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) - return max_len; - hlen = FCOE_HEADER_LEN; -#endif - } else { - return hdr.network - data; - } - - /* relocate pointer to start of L4 header */ - hdr.network += hlen; - - /* finally sort out TCP/UDP */ - if (nexthdr == IPPROTO_TCP) { - if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) - return max_len; - - /* access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (hdr.network[12] & 0xF0) >> 2; - - /* verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return hdr.network - data; - - hdr.network += hlen; - } else if (nexthdr == IPPROTO_UDP) { - if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) - return max_len; - - hdr.network += sizeof(struct udphdr); - } - - /* - * If everything has gone correctly hdr.network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((hdr.network - data) < max_len) - return hdr.network - data; - else - return max_len; -} - static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, struct sk_buff *skb) { @@ -1793,7 +1679,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, * we need the header to contain the greater of either ETH_HLEN or * 60 bytes if the skb->len is less than 60 for skb_pad. */ - pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); + pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); @@ -2191,9 +2077,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; - if (cleaned_count) - ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); - return total_rx_packets; } @@ -5300,15 +5183,15 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) { struct device *dev = tx_ring->dev; int orig_node = dev_to_node(dev); - int numa_node = -1; + int ring_node = -1; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; if (tx_ring->q_vector) - numa_node = tx_ring->q_vector->numa_node; + ring_node = tx_ring->q_vector->numa_node; - tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + tx_ring->tx_buffer_info = vzalloc_node(size, ring_node); if (!tx_ring->tx_buffer_info) tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) @@ -5320,7 +5203,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - set_dev_node(dev, numa_node); + set_dev_node(dev, ring_node); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, @@ -5384,15 +5267,15 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) { struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); - int numa_node = -1; + int ring_node = -1; int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; if (rx_ring->q_vector) - numa_node = rx_ring->q_vector->numa_node; + ring_node = rx_ring->q_vector->numa_node; - rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + rx_ring->rx_buffer_info = vzalloc_node(size, ring_node); if (!rx_ring->rx_buffer_info) rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) @@ -5404,7 +5287,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - set_dev_node(dev, numa_node); + set_dev_node(dev, ring_node); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, @@ -6319,25 +6202,55 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) ixgbe_ping_all_vfs(adapter); } +static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; + + if (tx_ring->next_to_use != tx_ring->next_to_clean) + return true; + } + + return false; +} + +static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + int i, j; + + if (!adapter->num_vfs) + return false; + + for (i = 0; i < adapter->num_vfs; i++) { + for (j = 0; j < q_per_pool; j++) { + u32 h, t; + + h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j)); + t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j)); + + if (h != t) + return true; + } + } + + return false; +} + /** * ixgbe_watchdog_flush_tx - flush queues on link down * @adapter: pointer to the device adapter structure **/ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) { - int i; - int some_tx_pending = 0; - if (!netif_carrier_ok(adapter->netdev)) { - for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; - if (tx_ring->next_to_use != tx_ring->next_to_clean) { - some_tx_pending = 1; - break; - } - } - - if (some_tx_pending) { + if (ixgbe_ring_tx_pending(adapter) || + ixgbe_vf_tx_pending(adapter)) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. @@ -6837,6 +6750,36 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } +static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(ixgbe_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + return 0; +} + +static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +{ + if (likely(ixgbe_desc_unused(tx_ring) >= size)) + return 0; + + return __ixgbe_maybe_stop_tx(tx_ring, size); +} + #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) @@ -6958,8 +6901,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, tx_ring->next_to_use = i; - /* notify HW of packet */ - ixgbe_write_tail(tx_ring, i); + ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { + /* notify HW of packet */ + ixgbe_write_tail(tx_ring, i); + } return; dma_error: @@ -7067,32 +7014,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring, input, common, ring->queue_index); } -static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) -{ - netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); - /* Herbert's original patch had: - * smp_mb__after_netif_stop_queue(); - * but since that doesn't exist yet, just open code it. */ - smp_mb(); - - /* We need to check again in a case another CPU has just - * made room available. */ - if (likely(ixgbe_desc_unused(tx_ring) < size)) - return -EBUSY; - - /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); - ++tx_ring->tx_stats.restart_queue; - return 0; -} - -static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) -{ - if (likely(ixgbe_desc_unused(tx_ring) >= size)) - return 0; - return __ixgbe_maybe_stop_tx(tx_ring, size); -} - static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { @@ -7187,9 +7108,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, - &adapter->state))) { + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + adapter->ptp_clock && + !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, + &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IXGBE_TX_FLAGS_TSTAMP; @@ -7261,8 +7183,6 @@ xmit_fcoe: #endif /* IXGBE_FCOE */ ixgbe_tx_map(tx_ring, first, hdr_len); - ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); - return NETDEV_TX_OK; out_drop: @@ -7735,39 +7655,13 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], const unsigned char *addr, u16 flags) { - struct ixgbe_adapter *adapter = netdev_priv(dev); - int err; - - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); - - /* Hardware does not support aging addresses so if a - * ndm_state is given only allow permanent addresses - */ - if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { - pr_info("%s: FDB only supports static addresses\n", - ixgbe_driver_name); - return -EINVAL; - } - + /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { - u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; - - if (netdev_uc_count(dev) < rar_uc_entries) - err = dev_uc_add_excl(dev, addr); - else - err = -ENOMEM; - } else if (is_multicast_ether_addr(addr)) { - err = dev_mc_add_excl(dev, addr); - } else { - err = -EINVAL; + if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev)) + return -ENOMEM; } - /* Only return duplicate errors if NLM_F_EXCL is set */ - if (err == -EEXIST && !(flags & NLM_F_EXCL)) - err = 0; - - return err; + return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); } static int ixgbe_ndo_bridge_setlink(struct net_device *dev, @@ -7830,9 +7724,17 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) { struct ixgbe_fwd_adapter *fwd_adapter = NULL; struct ixgbe_adapter *adapter = netdev_priv(pdev); + int used_pools = adapter->num_vfs + adapter->num_rx_pools; unsigned int limit; int pool, err; + /* Hardware has a limited number of available pools. Each VF, and the + * PF require a pool. Check to ensure we don't attempt to use more + * then the available number of pools. + */ + if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) + return ERR_PTR(-EINVAL); + #ifdef CONFIG_RPS if (vdev->num_rx_queues != vdev->num_tx_queues) { netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", @@ -8080,7 +7982,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbe_adapter *adapter = NULL; struct ixgbe_hw *hw; const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; - static int cards_found; int i, err, pci_using_dac, expected_gts; unsigned int indices = MAX_TX_QUEUES; u8 part_str[IXGBE_PBANUM_LENGTH]; @@ -8166,8 +8067,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->watchdog_timeo = 5 * HZ; strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); - adapter->bd_number = cards_found; - /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; @@ -8451,7 +8350,6 @@ skip_sriov: ixgbe_add_sanmac_netdev(netdev); e_dev_info("%s\n", ixgbe_default_device_descr); - cards_found++; #ifdef CONFIG_IXGBE_HWMON if (ixgbe_sysfs_init(adapter)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index c14d4d89672f..706fc69aa0c5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -250,13 +250,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) if (err) return err; - /* While the SR-IOV capability structure reports total VFs to be - * 64 we limit the actual number that can be allocated to 63 so - * that some transmit/receive resources can be reserved to the - * PF. The PCI bus driver already checks for other values out of - * range. + /* While the SR-IOV capability structure reports total VFs to be 64, + * we have to limit the actual number allocated based on two factors. + * First, we reserve some transmit/receive resources for the PF. + * Second, VMDQ also uses the same pools that SR-IOV does. We need to + * account for this, so that we don't accidentally allocate more VFs + * than we have available pools. The PCI bus driver already checks for + * other values out of range. */ - if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) + if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VF_FUNCTIONS) return -EPERM; adapter->num_vfs = num_vfs; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index e6b07c2a01fe..dfd55d83bc03 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2194,6 +2194,8 @@ enum { #define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) #define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) /* Translated register #defines */ +#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) +#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) @@ -2202,6 +2204,11 @@ enum { #define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDHN(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) +#define IXGBE_PVFTDTN(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) + enum ixgbe_fdir_pballoc_type { IXGBE_FDIR_PBALLOC_NONE = 0, IXGBE_FDIR_PBALLOC_64K = 1, diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index d420f124633f..cc0e5b7ff041 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -523,7 +523,7 @@ static const struct ixgbevf_reg_test reg_test_vf[] = { { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF }, { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 }, - { 0, 0, 0, 0 } + { .reg = 0 } }; static const u32 register_test_patterns[] = { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index a0a1de9ce238..ba96cb5b886d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -385,7 +385,6 @@ struct ixgbevf_adapter { /* structs defined in ixgbe_vf.h */ struct ixgbe_hw hw; u16 msg_enable; - u16 bd_number; /* Interrupt Throttle Rate */ u32 eitr_param; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c22a00c3621a..030a219c85e3 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3464,7 +3464,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct ixgbevf_adapter *adapter = NULL; struct ixgbe_hw *hw = NULL; const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; - static int cards_found; int err, pci_using_dac; err = pci_enable_device(pdev); @@ -3525,8 +3524,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ixgbevf_assign_netdev_ops(netdev); - adapter->bd_number = cards_found; - /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; @@ -3601,7 +3598,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw_dbg(hw, "MAC: %d\n", hw->mac.type); hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); - cards_found++; return 0; err_register: diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 4d44d64ae387..9cddd56d02c3 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -434,6 +434,21 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, if (!(links_reg & IXGBE_LINKS_UP)) goto out; + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: *speed = IXGBE_LINK_SPEED_10GB_FULL; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 24b242277ea1..264eab7d3b26 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -1107,7 +1107,7 @@ static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) { u16 v = 0; if (__xm_phy_read(hw, port, reg, &v)) - pr_warning("%s: phy read timed out\n", hw->dev[port]->name); + pr_warn("%s: phy read timed out\n", hw->dev[port]->name); return v; } @@ -1903,7 +1903,7 @@ static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) return 0; } - pr_warning("%s: phy write timeout\n", hw->dev[port]->name); + pr_warn("%s: phy write timeout\n", hw->dev[port]->name); return -EIO; } @@ -1931,7 +1931,7 @@ static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) { u16 v = 0; if (__gm_phy_read(hw, port, reg, &v)) - pr_warning("%s: phy read timeout\n", hw->dev[port]->name); + pr_warn("%s: phy read timeout\n", hw->dev[port]->name); return v; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index dba48a5ce7ab..bd3366267039 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2814,7 +2814,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) default: if (net_ratelimit()) - pr_warning("unknown status opcode 0x%x\n", opcode); + pr_warn("unknown status opcode 0x%x\n", opcode); } } while (hw->st_idx != idx); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index abddcf8c40aa..f3032fec8fce 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2459,6 +2459,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } priv->rx_ring_num = prof->rx_ring_num; priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; + priv->cqe_size = mdev->dev->caps.cqe_size; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9c909d23f14c..a33048ee9621 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -588,6 +588,8 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, skb_copy_to_linear_data(skb, va, length); skb->tail += length; } else { + unsigned int pull_len; + /* Move relevant fragments to skb */ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags, skb, length); @@ -597,16 +599,17 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, } skb_shinfo(skb)->nr_frags = used_frags; + pull_len = eth_get_headlen(va, SMALL_PACKET_SIZE); /* Copy headers into the skb linear buffer */ - memcpy(skb->data, va, HEADER_COPY_SIZE); - skb->tail += HEADER_COPY_SIZE; + memcpy(skb->data, va, pull_len); + skb->tail += pull_len; /* Skip headers in first fragment */ - skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; + skb_shinfo(skb)->frags[0].page_offset += pull_len; /* Adjust size of first fragment */ - skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE); - skb->data_len = length - HEADER_COPY_SIZE; + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], pull_len); + skb->data_len = length - pull_len; } return skb; } @@ -668,7 +671,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, @@ -769,7 +772,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud gro_skb->ip_summed = CHECKSUM_UNNECESSARY; if (l2_tunnel) - gro_skb->encapsulation = 1; + gro_skb->csum_level = 1; if ((cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { @@ -823,8 +826,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud skb->protocol = eth_type_trans(skb, dev); skb_record_rx_queue(skb, cq->ring); - if (l2_tunnel) - skb->encapsulation = 1; + if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) + skb->csum_level = 1; if (dev->features & NETIF_F_RXHASH) skb_set_hash(skb, @@ -855,7 +858,7 @@ next: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; if (++polled == budget) goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index dae3da6d8dd0..c44f4237b9be 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -319,7 +319,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, } } } - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); return tx_info->nr_txbb; } @@ -382,7 +382,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, return true; index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; ring_index = ring->cons & size_mask; stamp_index = ring_index; @@ -430,7 +430,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, ++cons_index; index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; + cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 2a004b347e1d..a49c9d11d8a5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -101,21 +101,24 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not) mb(); } -static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor) +static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, + u8 eqe_size) { /* (entry & (eq->nent - 1)) gives us a cyclic array */ - unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor); - /* CX3 is capable of extending the EQE from 32 to 64 bytes. - * When this feature is enabled, the first (in the lower addresses) + unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; + /* CX3 is capable of extending the EQE from 32 to 64 bytes with + * strides of 64B,128B and 256B. + * When 64B EQE is used, the first (in the lower addresses) * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes * contain the legacy EQE information. + * In all other cases, the first 32B contains the legacy EQE info. */ return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; } -static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor) +static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) { - struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor); + struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; } @@ -459,8 +462,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) enum slave_port_gen_event gen_event; unsigned long flags; struct mlx4_vport_state *s_info; + int eqe_size = dev->caps.eqe_size; - while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { + while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit. @@ -894,8 +898,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, eq->dev = dev; eq->nent = roundup_pow_of_two(max(nent, 2)); - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B. + */ + npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; eq->page_list = kmalloc(npages * sizeof *eq->page_list, GFP_KERNEL); @@ -997,8 +1003,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox; int err; int i; - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE; + /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with + * strides of 64B,128B and 256B + */ + int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 494753e44ae3..13b2e4a51ef4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -137,7 +137,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [8] = "Dynamic QP updates support", [9] = "Device managed flow steering IPoIB support", [10] = "TCP/IP offloads/flow-steering for VXLAN support", - [11] = "MAD DEMUX (Secure-Host) support" + [11] = "MAD DEMUX (Secure-Host) support", + [12] = "Large cache line (>64B) CQE stride support", + [13] = "Large cache line (>64B) EQE stride support" }; int i; @@ -557,6 +559,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 +#define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 @@ -733,6 +736,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->max_rq_sg = field; MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET); dev_cap->max_rq_desc_sz = size; + MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE); + if (field & (1 << 6)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + if (field & (1 << 7)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); @@ -1376,6 +1384,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38) +#define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b) #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) @@ -1452,11 +1461,25 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) && + (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) { + dev->caps.eqe_size = cache_line_size(); + dev->caps.cqe_size = cache_line_size(); + dev->caps.eqe_factor = 0; + MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 | + (ilog2(dev->caps.eqe_size) - 5)), + INIT_HCA_EQE_CQE_STRIDE_OFFSET); + + /* User still need to know to support CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); @@ -1616,6 +1639,17 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, if (byte_field & 0x40) /* 64-bytes cqe enabled */ param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */ + MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET); + if (byte_field) { + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; + param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; + param->cqe_size = 1 << ((byte_field & + MLX4_CQE_SIZE_MASK_STRIDE) + 5); + param->eqe_size = 1 << (((byte_field & + MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5); + } + /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 1fce03ebe5c4..9b835aecac96 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -178,6 +178,8 @@ struct mlx4_init_hca_param { u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 steering_mode; /* for QUERY_HCA */ u64 dev_cap_enabled; + u16 cqe_size; /* For use only when CQE stride feature enabled */ + u16 eqe_size; /* For use only when EQE stride feature enabled */ }; struct mlx4_init_ib_param { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7e2d5d57c598..1f10023af1db 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -104,7 +104,8 @@ module_param(enable_64b_cqe_eqe, bool, 0444); MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); -#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE +#define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ + MLX4_FUNC_CAP_EQE_CQE_STRIDE) static char mlx4_version[] = DRV_NAME ": Mellanox ConnectX core driver v" @@ -196,6 +197,40 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev) dev->caps.port_mask[i] = dev->caps.port_type[i]; } +static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) +{ + struct mlx4_caps *dev_cap = &dev->caps; + + /* FW not supporting or cancelled by user */ + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || + !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) + return; + + /* Must have 64B CQE_EQE enabled by FW to use bigger stride + * When FW has NCSI it may decide not to report 64B CQE/EQEs + */ + if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || + !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + return; + } + + if (cache_line_size() == 128 || cache_line_size() == 256) { + mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); + /* Changing the real data inside CQE size to 32B */ + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; + dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; + + if (mlx4_is_master(dev)) + dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; + } else { + mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } +} + static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { int err; @@ -390,6 +425,14 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; } + + if (dev_cap->flags2 & + (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | + MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { + mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; + dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; + } } if ((dev->caps.flags & @@ -397,6 +440,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) mlx4_is_master(dev)) dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; + if (!mlx4_is_slave(dev)) + mlx4_enable_cqe_eqe_stride(dev); + return 0; } @@ -724,11 +770,22 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; } else { dev->caps.cqe_size = 32; } + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { + dev->caps.eqe_size = hca_param.eqe_size; + dev->caps.eqe_factor = 0; + } + + if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { + dev->caps.cqe_size = hca_param.cqe_size; + /* User still need to know when CQE > 32B */ + dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; + } + dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index b508c7887ef8..de10dbb2e6ed 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -285,6 +285,9 @@ struct mlx4_icm_table { #define MLX4_MPT_STATUS_SW 0xF0 #define MLX4_MPT_STATUS_HW 0x00 +#define MLX4_CQE_SIZE_MASK_STRIDE 0x3 +#define MLX4_EQE_SIZE_MASK_STRIDE 0x30 + /* * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 3de41be49425..e3d71c386da7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -542,6 +542,7 @@ struct mlx4_en_priv { unsigned max_mtu; int base_qpn; int cqe_factor; + int cqe_size; struct mlx4_en_rss_map rss_map; __be32 ctrl_flags; @@ -612,6 +613,11 @@ struct mlx4_mac_entry { struct rcu_head rcu; }; +static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) +{ + return buf + idx * cqe_sz; +} + #ifdef CONFIG_NET_RX_BUSY_POLL static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) { diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 4f40d7b8629e..cc0485e3c621 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3537,7 +3537,7 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev) vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); - strncpy(buf, dev->name, IFNAMSIZ); + strlcpy(buf, dev->name, IFNAMSIZ); flush_work(&vdev->reset_task); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 925b296d8ab8..f39cae620f61 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1481,7 +1481,7 @@ static int phy_init(struct net_device *dev) } /* phy vendor specific configuration */ - if ((np->phy_oui == PHY_OUI_CICADA)) { + if (np->phy_oui == PHY_OUI_CICADA) { if (init_cicada(dev, np, phyinterface)) { netdev_info(dev, "%s: phy init failed\n", pci_name(np->pci_dev)); diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 2d6b148528dd..fa2db41e02f8 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -693,11 +693,11 @@ static void yellowfin_tx_timeout(struct net_device *dev) /* Note: these should be KERN_DEBUG. */ if (yellowfin_debug) { int i; - pr_warning(" Rx ring %p: ", yp->rx_ring); + pr_warn(" Rx ring %p: ", yp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) pr_cont(" %08x", yp->rx_ring[i].result_status); pr_cont("\n"); - pr_warning(" Tx ring %p: ", yp->tx_ring); + pr_warn(" Tx ring %p: ", yp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) pr_cont(" %04x /%08x", yp->tx_status[i].tx_errs, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 1159031f885b..32456c79cc73 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1477,9 +1477,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u32 val; if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { - pr_warning("%s: chip revisions between 0x%x-0x%x " - "will not be enabled.\n", - module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); + pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n", + module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1); return -ENODEV; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index b84f5ea3d659..e56c1bb36141 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -39,8 +39,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 61 -#define QLCNIC_LINUX_VERSIONID "5.3.61" +#define _QLCNIC_LINUX_SUBVERSION 62 +#define QLCNIC_LINUX_VERSIONID "5.3.62" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -540,6 +540,8 @@ struct qlcnic_hardware_context { u8 lb_mode; u16 vxlan_port; struct device *hwmon_dev; + u32 post_mode; + bool run_post; }; struct qlcnic_adapter_stats { @@ -2283,6 +2285,7 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops; #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 #define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 +#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830 #define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430 #define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040 #define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440 @@ -2307,6 +2310,7 @@ static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) bool status; status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) || + (device == PCI_DEVICE_ID_QLOGIC_QLE8830) || (device == PCI_DEVICE_ID_QLOGIC_QLE844X) || (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) || (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 476e4998ef99..840bf36b5e9d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -35,6 +35,35 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *); #define QLC_SKIP_INACTIVE_PCI_REGS 7 #define QLC_MAX_LEGACY_FUNC_SUPP 8 +/* 83xx Module type */ +#define QLC_83XX_MODULE_FIBRE_10GBASE_LRM 0x1 /* 10GBase-LRM */ +#define QLC_83XX_MODULE_FIBRE_10GBASE_LR 0x2 /* 10GBase-LR */ +#define QLC_83XX_MODULE_FIBRE_10GBASE_SR 0x3 /* 10GBase-SR */ +#define QLC_83XX_MODULE_DA_10GE_PASSIVE_CP 0x4 /* 10GE passive + * copper(compliant) + */ +#define QLC_83XX_MODULE_DA_10GE_ACTIVE_CP 0x5 /* 10GE active limiting + * copper(compliant) + */ +#define QLC_83XX_MODULE_DA_10GE_LEGACY_CP 0x6 /* 10GE passive copper + * (legacy, best effort) + */ +#define QLC_83XX_MODULE_FIBRE_1000BASE_SX 0x7 /* 1000Base-SX */ +#define QLC_83XX_MODULE_FIBRE_1000BASE_LX 0x8 /* 1000Base-LX */ +#define QLC_83XX_MODULE_FIBRE_1000BASE_CX 0x9 /* 1000Base-CX */ +#define QLC_83XX_MODULE_TP_1000BASE_T 0xa /* 1000Base-T*/ +#define QLC_83XX_MODULE_DA_1GE_PASSIVE_CP 0xb /* 1GE passive copper + * (legacy, best effort) + */ +#define QLC_83XX_MODULE_UNKNOWN 0xf /* Unknown module type */ + +/* Port types */ +#define QLC_83XX_10_CAPABLE BIT_8 +#define QLC_83XX_100_CAPABLE BIT_9 +#define QLC_83XX_1G_CAPABLE BIT_10 +#define QLC_83XX_10G_CAPABLE BIT_11 +#define QLC_83XX_AUTONEG_ENABLE BIT_15 + static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, {QLCNIC_CMD_CONFIG_INTRPT, 18, 34}, @@ -667,6 +696,7 @@ void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf, int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) { + struct qlcnic_hardware_context *ahw = adapter->ahw; int status; status = qlcnic_83xx_get_port_config(adapter); @@ -674,13 +704,20 @@ int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter) dev_err(&adapter->pdev->dev, "Get Port Info failed\n"); } else { - if (QLC_83XX_SFP_10G_CAPABLE(adapter->ahw->port_config)) - adapter->ahw->port_type = QLCNIC_XGBE; - else - adapter->ahw->port_type = QLCNIC_GBE; - if (QLC_83XX_AUTONEG(adapter->ahw->port_config)) - adapter->ahw->link_autoneg = AUTONEG_ENABLE; + if (ahw->port_config & QLC_83XX_10G_CAPABLE) { + ahw->port_type = QLCNIC_XGBE; + } else if (ahw->port_config & QLC_83XX_10_CAPABLE || + ahw->port_config & QLC_83XX_100_CAPABLE || + ahw->port_config & QLC_83XX_1G_CAPABLE) { + ahw->port_type = QLCNIC_GBE; + } else { + ahw->port_type = QLCNIC_XGBE; + } + + if (QLC_83XX_AUTONEG(ahw->port_config)) + ahw->link_autoneg = AUTONEG_ENABLE; + } return status; } @@ -2664,7 +2701,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter) QLC_83XX_FLASH_STATUS_READY) break; - msleep(QLC_83XX_FLASH_STATUS_REG_POLL_DELAY); + usleep_range(1000, 1100); } while (--retries); if (!retries) @@ -3176,22 +3213,33 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) break; } config = cmd.rsp.arg[3]; - if (QLC_83XX_SFP_PRESENT(config)) { - switch (ahw->module_type) { - case LINKEVENT_MODULE_OPTICAL_UNKNOWN: - case LINKEVENT_MODULE_OPTICAL_SRLR: - case LINKEVENT_MODULE_OPTICAL_LRM: - case LINKEVENT_MODULE_OPTICAL_SFP_1G: - ahw->supported_type = PORT_FIBRE; - break; - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: - case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: - case LINKEVENT_MODULE_TWINAX: - ahw->supported_type = PORT_TP; - break; - default: - ahw->supported_type = PORT_OTHER; - } + switch (QLC_83XX_SFP_MODULE_TYPE(config)) { + case QLC_83XX_MODULE_FIBRE_10GBASE_LRM: + case QLC_83XX_MODULE_FIBRE_10GBASE_LR: + case QLC_83XX_MODULE_FIBRE_10GBASE_SR: + ahw->supported_type = PORT_FIBRE; + ahw->port_type = QLCNIC_XGBE; + break; + case QLC_83XX_MODULE_FIBRE_1000BASE_SX: + case QLC_83XX_MODULE_FIBRE_1000BASE_LX: + case QLC_83XX_MODULE_FIBRE_1000BASE_CX: + ahw->supported_type = PORT_FIBRE; + ahw->port_type = QLCNIC_GBE; + break; + case QLC_83XX_MODULE_TP_1000BASE_T: + ahw->supported_type = PORT_TP; + ahw->port_type = QLCNIC_GBE; + break; + case QLC_83XX_MODULE_DA_10GE_PASSIVE_CP: + case QLC_83XX_MODULE_DA_10GE_ACTIVE_CP: + case QLC_83XX_MODULE_DA_10GE_LEGACY_CP: + case QLC_83XX_MODULE_DA_1GE_PASSIVE_CP: + ahw->supported_type = PORT_DA; + ahw->port_type = QLCNIC_XGBE; + break; + default: + ahw->supported_type = PORT_OTHER; + ahw->port_type = QLCNIC_XGBE; } if (config & 1) err = 1; @@ -3204,9 +3252,9 @@ out: int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, struct ethtool_cmd *ecmd) { + struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = 0; int status = 0; - struct qlcnic_hardware_context *ahw = adapter->ahw; if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) { /* Get port configuration info */ @@ -3229,20 +3277,41 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, ecmd->autoneg = AUTONEG_DISABLE; } - if (ahw->port_type == QLCNIC_XGBE) { - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->advertising = ADVERTISED_10000baseT_Full; + ecmd->supported = (SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_Autoneg); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + if (ahw->port_config & QLC_83XX_10_CAPABLE) + ecmd->advertising |= SUPPORTED_10baseT_Full; + if (ahw->port_config & QLC_83XX_100_CAPABLE) + ecmd->advertising |= SUPPORTED_100baseT_Full; + if (ahw->port_config & QLC_83XX_1G_CAPABLE) + ecmd->advertising |= SUPPORTED_1000baseT_Full; + if (ahw->port_config & QLC_83XX_10G_CAPABLE) + ecmd->advertising |= SUPPORTED_10000baseT_Full; + if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE) + ecmd->advertising |= ADVERTISED_Autoneg; } else { - ecmd->supported = (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); - ecmd->advertising = (ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); + switch (ahw->link_speed) { + case SPEED_10: + ecmd->advertising = SUPPORTED_10baseT_Full; + break; + case SPEED_100: + ecmd->advertising = SUPPORTED_100baseT_Full; + break; + case SPEED_1000: + ecmd->advertising = SUPPORTED_1000baseT_Full; + break; + case SPEED_10000: + ecmd->advertising = SUPPORTED_10000baseT_Full; + break; + default: + break; + } + } switch (ahw->supported_type) { @@ -3258,6 +3327,12 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, ecmd->port = PORT_TP; ecmd->transceiver = XCVR_INTERNAL; break; + case PORT_DA: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_DA; + ecmd->transceiver = XCVR_EXTERNAL; + break; default: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; @@ -3272,35 +3347,60 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter, struct ethtool_cmd *ecmd) { - int status = 0; + struct qlcnic_hardware_context *ahw = adapter->ahw; u32 config = adapter->ahw->port_config; + int status = 0; - if (ecmd->autoneg) - adapter->ahw->port_config |= BIT_15; - - switch (ethtool_cmd_speed(ecmd)) { - case SPEED_10: - adapter->ahw->port_config |= BIT_8; - break; - case SPEED_100: - adapter->ahw->port_config |= BIT_9; - break; - case SPEED_1000: - adapter->ahw->port_config |= BIT_10; - break; - case SPEED_10000: - adapter->ahw->port_config |= BIT_11; - break; - default: - return -EINVAL; + /* 83xx devices do not support Half duplex */ + if (ecmd->duplex == DUPLEX_HALF) { + netdev_info(adapter->netdev, + "Half duplex mode not supported\n"); + return -EINVAL; } + if (ecmd->autoneg) { + ahw->port_config |= QLC_83XX_AUTONEG_ENABLE; + ahw->port_config |= (QLC_83XX_100_CAPABLE | + QLC_83XX_1G_CAPABLE | + QLC_83XX_10G_CAPABLE); + } else { /* force speed */ + ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE; + switch (ethtool_cmd_speed(ecmd)) { + case SPEED_10: + ahw->port_config &= ~(QLC_83XX_100_CAPABLE | + QLC_83XX_1G_CAPABLE | + QLC_83XX_10G_CAPABLE); + ahw->port_config |= QLC_83XX_10_CAPABLE; + break; + case SPEED_100: + ahw->port_config &= ~(QLC_83XX_10_CAPABLE | + QLC_83XX_1G_CAPABLE | + QLC_83XX_10G_CAPABLE); + ahw->port_config |= QLC_83XX_100_CAPABLE; + break; + case SPEED_1000: + ahw->port_config &= ~(QLC_83XX_10_CAPABLE | + QLC_83XX_100_CAPABLE | + QLC_83XX_10G_CAPABLE); + ahw->port_config |= QLC_83XX_1G_CAPABLE; + break; + case SPEED_10000: + ahw->port_config &= ~(QLC_83XX_10_CAPABLE | + QLC_83XX_100_CAPABLE | + QLC_83XX_1G_CAPABLE); + ahw->port_config |= QLC_83XX_10G_CAPABLE; + break; + default: + return -EINVAL; + } + } status = qlcnic_83xx_set_port_config(adapter); if (status) { - dev_info(&adapter->pdev->dev, - "Failed to Set Link Speed and autoneg.\n"); - adapter->ahw->port_config = config; + netdev_info(adapter->netdev, + "Failed to Set Link Speed and autoneg.\n"); + ahw->port_config = config; } + return status; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 2bf101a47d02..f3346a3779d3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -83,6 +83,7 @@ /* Firmware image definitions */ #define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000 #define QLC_83XX_FW_FILE_NAME "83xx_fw.bin" +#define QLC_83XX_POST_FW_FILE_NAME "83xx_post_fw.bin" #define QLC_84XX_FW_FILE_NAME "84xx_fw.bin" #define QLC_83XX_BOOT_FROM_FLASH 0 #define QLC_83XX_BOOT_FROM_FILE 0x12345678 @@ -360,7 +361,6 @@ enum qlcnic_83xx_states { #define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F) #define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16)) #define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10) -#define QLC_83XX_SFP_10G_CAPABLE(data) ((data) & BIT_11) #define QLC_83XX_LINK_STATS(data) ((data) & BIT_0) #define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7) #define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 86783e1afcf7..9a2cfe4efac6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2075,6 +2075,121 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev) dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__); } +/* POST FW related definations*/ +#define QLC_83XX_POST_SIGNATURE_REG 0x41602014 +#define QLC_83XX_POST_MODE_REG 0x41602018 +#define QLC_83XX_POST_FAST_MODE 0 +#define QLC_83XX_POST_MEDIUM_MODE 1 +#define QLC_83XX_POST_SLOW_MODE 2 + +/* POST Timeout values in milliseconds */ +#define QLC_83XX_POST_FAST_MODE_TIMEOUT 690 +#define QLC_83XX_POST_MED_MODE_TIMEOUT 2930 +#define QLC_83XX_POST_SLOW_MODE_TIMEOUT 7500 + +/* POST result values */ +#define QLC_83XX_POST_PASS 0xfffffff0 +#define QLC_83XX_POST_ASIC_STRESS_TEST_FAIL 0xffffffff +#define QLC_83XX_POST_DDR_TEST_FAIL 0xfffffffe +#define QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL 0xfffffffc +#define QLC_83XX_POST_FLASH_TEST_FAIL 0xfffffff8 + +static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter) +{ + struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; + struct device *dev = &adapter->pdev->dev; + int timeout, count, ret = 0; + u32 signature; + + /* Set timeout values with extra 2 seconds of buffer */ + switch (adapter->ahw->post_mode) { + case QLC_83XX_POST_FAST_MODE: + timeout = QLC_83XX_POST_FAST_MODE_TIMEOUT + 2000; + break; + case QLC_83XX_POST_MEDIUM_MODE: + timeout = QLC_83XX_POST_MED_MODE_TIMEOUT + 2000; + break; + case QLC_83XX_POST_SLOW_MODE: + timeout = QLC_83XX_POST_SLOW_MODE_TIMEOUT + 2000; + break; + default: + return -EINVAL; + } + + strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME, + QLC_FW_FILE_NAME_LEN); + + ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev); + if (ret) { + dev_err(dev, "POST firmware can not be loaded, skipping POST\n"); + return 0; + } + + ret = qlcnic_83xx_copy_fw_file(adapter); + if (ret) + return ret; + + /* clear QLC_83XX_POST_SIGNATURE_REG register */ + qlcnic_ind_wr(adapter, QLC_83XX_POST_SIGNATURE_REG, 0); + + /* Set POST mode */ + qlcnic_ind_wr(adapter, QLC_83XX_POST_MODE_REG, + adapter->ahw->post_mode); + + QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, + QLC_83XX_BOOT_FROM_FILE); + + qlcnic_83xx_start_hw(adapter); + + count = 0; + do { + msleep(100); + count += 100; + + signature = qlcnic_ind_rd(adapter, QLC_83XX_POST_SIGNATURE_REG); + if (signature == QLC_83XX_POST_PASS) + break; + } while (timeout > count); + + if (timeout <= count) { + dev_err(dev, "POST timed out, signature = 0x%08x\n", signature); + return -EIO; + } + + switch (signature) { + case QLC_83XX_POST_PASS: + dev_info(dev, "POST passed, Signature = 0x%08x\n", signature); + break; + case QLC_83XX_POST_ASIC_STRESS_TEST_FAIL: + dev_err(dev, "POST failed, Test case : ASIC STRESS TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + case QLC_83XX_POST_DDR_TEST_FAIL: + dev_err(dev, "POST failed, Test case : DDT TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + case QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL: + dev_err(dev, "POST failed, Test case : ASIC MEMORY TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + case QLC_83XX_POST_FLASH_TEST_FAIL: + dev_err(dev, "POST failed, Test case : FLASH TEST, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + default: + dev_err(dev, "POST failed, Test case : INVALID, Signature = 0x%08x\n", + signature); + ret = -EIO; + break; + } + + return ret; +} + static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter) { struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; @@ -2119,8 +2234,27 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter) if (qlcnic_83xx_copy_bootloader(adapter)) return err; + + /* Check if POST needs to be run */ + if (adapter->ahw->run_post) { + err = qlcnic_83xx_run_post(adapter); + if (err) + return err; + + /* No need to run POST in next reset sequence */ + adapter->ahw->run_post = false; + + /* Again reset the adapter to load regular firmware */ + qlcnic_83xx_stop_hw(adapter); + qlcnic_83xx_init_hw(adapter); + + err = qlcnic_83xx_copy_bootloader(adapter); + if (err) + return err; + } + /* Boot either flash image or firmware image from host file system */ - if (qlcnic_load_fw_file) { + if (qlcnic_load_fw_file == 1) { if (qlcnic_83xx_load_fw_image_from_host(adapter)) return err; } else { @@ -2284,6 +2418,7 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter) fw_info = ahw->fw_info; switch (pdev->device) { case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE8830: strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME, QLC_FW_FILE_NAME_LEN); break; @@ -2328,6 +2463,25 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) adapter->rx_mac_learn = false; ahw->msix_supported = !!qlcnic_use_msi_x; + /* Check if POST needs to be run */ + switch (qlcnic_load_fw_file) { + case 2: + ahw->post_mode = QLC_83XX_POST_FAST_MODE; + ahw->run_post = true; + break; + case 3: + ahw->post_mode = QLC_83XX_POST_MEDIUM_MODE; + ahw->run_post = true; + break; + case 4: + ahw->post_mode = QLC_83XX_POST_SLOW_MODE; + ahw->run_post = true; + break; + default: + ahw->run_post = false; + break; + } + qlcnic_83xx_init_rings(adapter); err = qlcnic_83xx_init_mailbox_work(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 851cb4a80d50..8102673cb37f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) } return -EIO; } - msleep(1); + usleep_range(1000, 1500); } if (id_reg) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index c4262c23ed7c..be41e4c77b65 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -537,7 +537,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter) QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); - msleep(1); + usleep_range(1000, 1500); QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); @@ -1198,7 +1198,7 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter) flashaddr += 8; } } - msleep(1); + usleep_range(1000, 1500); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020); QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e); @@ -1295,7 +1295,7 @@ next: rc = qlcnic_validate_firmware(adapter); if (rc != 0) { release_firmware(adapter->fw); - msleep(1); + usleep_range(1000, 1500); goto next; } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index e45bf09af0c9..18e5de72e9b4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -1753,7 +1753,7 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, if (qlcnic_encap_length(sts_data[1]) && skb->ip_summed == CHECKSUM_UNNECESSARY) { - skb->encapsulation = 1; + skb->csum_level = 1; adapter->stats.encap_rx_csummed++; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index cf08b2de071e..f5e29f7bdae3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -52,7 +52,7 @@ MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)"); module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); int qlcnic_load_fw_file; -MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); +MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file, 2=POST in fast mode, 3= POST in medium mode, 4=POST in slow mode)"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); @@ -111,6 +111,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) static const struct pci_device_id qlcnic_pci_tbl[] = { ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), + ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X), ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X), ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X), @@ -228,6 +229,11 @@ static const struct qlcnic_board_info qlcnic_boards[] = { PCI_DEVICE_ID_QLOGIC_QLE834X, 0x0, 0x0, "8300 Series 1/10GbE Controller" }, { PCI_VENDOR_ID_QLOGIC, + PCI_DEVICE_ID_QLOGIC_QLE8830, + 0x0, + 0x0, + "8830 Series 1/10GbE Controller" }, + { PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_QLE824X, PCI_VENDOR_ID_QLOGIC, 0x203, @@ -1131,6 +1137,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar) *bar = QLCNIC_82XX_BAR0_LENGTH; break; case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE8830: case PCI_DEVICE_ID_QLOGIC_QLE844X: case PCI_DEVICE_ID_QLOGIC_VF_QLE834X: case PCI_DEVICE_ID_QLOGIC_VF_QLE844X: @@ -2474,6 +2481,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ahw->reg_tbl = (u32 *) qlcnic_reg_tbl; break; case PCI_DEVICE_ID_QLOGIC_QLE834X: + case PCI_DEVICE_ID_QLOGIC_QLE8830: case PCI_DEVICE_ID_QLOGIC_QLE844X: qlcnic_83xx_register_map(ahw); break; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 3e96f269150d..6c904a6cad2a 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1922,7 +1922,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, sbq_desc->p.skb = NULL; skb_reserve(skb, NET_IP_ALIGN); } - while (length > 0) { + do { lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); size = (length < rx_ring->lbq_buf_size) ? length : rx_ring->lbq_buf_size; @@ -1939,7 +1939,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, skb->truesize += size; length -= size; i++; - } + } while (length > 0); ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, &hlen); __pskb_pull_tail(skb, hlen); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 0921302553c6..1d81238fcb93 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -52,6 +52,10 @@ #define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" #define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" #define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" #ifdef RTL8169_DEBUG #define assert(expr) \ @@ -147,6 +151,10 @@ enum mac_version { RTL_GIGA_MAC_VER_42, RTL_GIGA_MAC_VER_43, RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, RTL_GIGA_MAC_NONE = 0xff, }; @@ -282,6 +290,18 @@ static const struct { [RTL_GIGA_MAC_VER_44] = _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_45] = + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_46] = + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_47] = + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, + JUMBO_1K, false), + [RTL_GIGA_MAC_VER_48] = + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, + JUMBO_1K, false), }; #undef _R @@ -410,6 +430,7 @@ enum rtl8168_8101_registers { #define EPHYAR_DATA_MASK 0xffff DLLPR = 0xd0, #define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) DBG_REG = 0xd1, #define FIX_NAK_1 (1 << 4) #define FIX_NAK_2 (1 << 3) @@ -429,6 +450,8 @@ enum rtl8168_8101_registers { #define EFUSEAR_REG_MASK 0x03ff #define EFUSEAR_REG_SHIFT 8 #define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) }; enum rtl8168_registers { @@ -447,6 +470,7 @@ enum rtl8168_registers { #define ERIAR_MASK_SHIFT 12 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) EPHY_RXER_NUM = 0x7c, @@ -598,6 +622,9 @@ enum rtl_register_content { /* DumpCounterCommand */ CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ }; enum rtl_desc_bit { @@ -823,6 +850,10 @@ MODULE_FIRMWARE(FIRMWARE_8106E_1); MODULE_FIRMWARE(FIRMWARE_8106E_2); MODULE_FIRMWARE(FIRMWARE_8168G_2); MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); static void rtl_lock_work(struct rtl8169_private *tp) { @@ -1514,8 +1545,17 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) options = RTL_R8(Config3); if (options & LinkUp) wolopts |= WAKE_PHY; - if (options & MagicPacket) - wolopts |= WAKE_MAGIC; + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) + wolopts |= WAKE_MAGIC; + break; + default: + if (options & MagicPacket) + wolopts |= WAKE_MAGIC; + break; + } options = RTL_R8(Config5); if (options & UWF) @@ -1543,24 +1583,48 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) { void __iomem *ioaddr = tp->mmio_addr; - unsigned int i; + unsigned int i, tmp; static const struct { u32 opt; u16 reg; u8 mask; } cfg[] = { { WAKE_PHY, Config3, LinkUp }, - { WAKE_MAGIC, Config3, MagicPacket }, { WAKE_UCAST, Config5, UWF }, { WAKE_BCAST, Config5, BWF }, { WAKE_MCAST, Config5, MWF }, - { WAKE_ANY, Config5, LanWake } + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } }; u8 options; RTL_W8(Cfg9346, Cfg9346_Unlock); - for (i = 0; i < ARRAY_SIZE(cfg); i++) { + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + tmp = ARRAY_SIZE(cfg) - 1; + if (wolopts & WAKE_MAGIC) + rtl_w1w0_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + MagicPacket_v2, + 0x0000, + ERIAR_EXGMAC); + else + rtl_w1w0_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + 0x0000, + MagicPacket_v2, + ERIAR_EXGMAC); + break; + default: + tmp = ARRAY_SIZE(cfg); + break; + } + + for (i = 0; i < tmp; i++) { options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; if (wolopts & cfg[i].opt) options |= cfg[i].mask; @@ -2045,6 +2109,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, u32 val; int mac_version; } mac_info[] = { + /* 8168H family. */ + { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 }, + { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 }, + /* 8168G family. */ { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, @@ -2140,6 +2208,14 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, tp->mac_version = tp->mii.supports_gmii ? RTL_GIGA_MAC_VER_42 : RTL_GIGA_MAC_VER_43; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_45 : + RTL_GIGA_MAC_VER_47; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_46 : + RTL_GIGA_MAC_VER_48; } } @@ -3465,6 +3541,189 @@ static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) rtl_apply_firmware(tp); } +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) +{ + u16 dout_tapbin; + u32 data; + + rtl_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x809b); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0xf800); + rtl_writephy(tp, 0x13, 0x80a2); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0xff00); + rtl_writephy(tp, 0x13, 0x80a4); + rtl_w1w0_phy(tp, 0x14, 0x8500, 0xff00); + rtl_writephy(tp, 0x13, 0x809c); + rtl_w1w0_phy(tp, 0x14, 0xbd00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - giga slave */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80ad); + rtl_w1w0_phy(tp, 0x14, 0x7000, 0xf800); + rtl_writephy(tp, 0x13, 0x80b4); + rtl_w1w0_phy(tp, 0x14, 0x5000, 0xff00); + rtl_writephy(tp, 0x13, 0x80ac); + rtl_w1w0_phy(tp, 0x14, 0x4000, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - fnet */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808e); + rtl_w1w0_phy(tp, 0x14, 0x1200, 0xff00); + rtl_writephy(tp, 0x13, 0x8090); + rtl_w1w0_phy(tp, 0x14, 0xe500, 0xff00); + rtl_writephy(tp, 0x13, 0x8092); + rtl_w1w0_phy(tp, 0x14, 0x9f00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + rtl_writephy(tp, 0x1f, 0x0a46); + data = rtl_readphy(tp, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = rtl_readphy(tp, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin^0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x827a); + rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827b); + rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827c); + rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827d); + rtl_w1w0_phy(tp, 0x14, dout_tapbin, 0xf000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* SAR ADC performance */ + rtl_writephy(tp, 0x1f, 0x0bca); + rtl_w1w0_phy(tp, 0x17, 0x4000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x803f); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8047); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x804f); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8057); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x805f); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8067); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x806f); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) +{ + u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0; + u16 rlen; + u32 data; + + rtl_apply_firmware(tp); + + /* CHIN EST parameter update */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808a); + rtl_w1w0_phy(tp, 0x14, 0x000a, 0x003f); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable R-tune & PGA-retune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w1w0_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w1w0_phy(tp, 0x16, 0x0002, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w1w0_phy(tp, 0x11, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data = r8168_mac_ocp_read(tp, 0xdd02); + ioffset_p3 = ((data & 0x80)>>7); + ioffset_p3 <<= 3; + + data = r8168_mac_ocp_read(tp, 0xdd00); + ioffset_p3 |= ((data & (0xe000))>>13); + ioffset_p2 = ((data & (0x1e00))>>9); + ioffset_p1 = ((data & (0x01e0))>>5); + ioffset_p0 = ((data & 0x0010)>>4); + ioffset_p0 <<= 3; + ioffset_p0 |= (data & (0x07)); + data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); + + if ((ioffset_p3 != 0x0F) || (ioffset_p2 != 0x0F) || + (ioffset_p1 != 0x0F) || (ioffset_p0 == 0x0F)) { + rtl_writephy(tp, 0x1f, 0x0bcf); + rtl_writephy(tp, 0x16, data); + rtl_writephy(tp, 0x1f, 0x0000); + } + + /* Modify rlen (TX LPF corner frequency) level */ + rtl_writephy(tp, 0x1f, 0x0bcd); + data = rtl_readphy(tp, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12); + rtl_writephy(tp, 0x17, data); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x1f, 0x0000); + + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -3655,6 +3914,14 @@ static void rtl_hw_phy_config(struct net_device *dev) case RTL_GIGA_MAC_VER_44: rtl8168g_2_hw_phy_config(tp); break; + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_47: + rtl8168h_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_48: + rtl8168h_2_hw_phy_config(tp); + break; case RTL_GIGA_MAC_VER_41: default: @@ -3866,6 +4133,10 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: ops->write = r8168g_mdio_write; ops->read = r8168g_mdio_read; break; @@ -3920,6 +4191,10 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -3988,6 +4263,10 @@ static void r810x_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_13: case RTL_GIGA_MAC_VER_16: break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0); + break; default: RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); break; @@ -4088,6 +4367,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_31: case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); break; case RTL_GIGA_MAC_VER_40: @@ -4112,6 +4393,10 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_33: RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); break; + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xC0); + break; case RTL_GIGA_MAC_VER_40: case RTL_GIGA_MAC_VER_41: rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, @@ -4154,6 +4439,8 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_37: case RTL_GIGA_MAC_VER_39: case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: ops->down = r810x_pll_power_down; ops->up = r810x_pll_power_up; break; @@ -4183,6 +4470,8 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_41: case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: ops->down = r8168_pll_power_down; ops->up = r8168_pll_power_up; break; @@ -4233,6 +4522,10 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: @@ -4394,6 +4687,10 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: default: ops->disable = NULL; ops->enable = NULL; @@ -4496,15 +4793,19 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) tp->mac_version == RTL_GIGA_MAC_VER_31) { rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || - tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_36 || - tp->mac_version == RTL_GIGA_MAC_VER_37 || - tp->mac_version == RTL_GIGA_MAC_VER_40 || - tp->mac_version == RTL_GIGA_MAC_VER_41 || - tp->mac_version == RTL_GIGA_MAC_VER_42 || - tp->mac_version == RTL_GIGA_MAC_VER_43 || - tp->mac_version == RTL_GIGA_MAC_VER_44 || - tp->mac_version == RTL_GIGA_MAC_VER_38) { + tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_38 || + tp->mac_version == RTL_GIGA_MAC_VER_40 || + tp->mac_version == RTL_GIGA_MAC_VER_41 || + tp->mac_version == RTL_GIGA_MAC_VER_42 || + tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || + tp->mac_version == RTL_GIGA_MAC_VER_45 || + tp->mac_version == RTL_GIGA_MAC_VER_46 || + tp->mac_version == RTL_GIGA_MAC_VER_47 || + tp->mac_version == RTL_GIGA_MAC_VER_48) { RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); } else { @@ -5331,6 +5632,105 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); } +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + u16 rg_saw_cnt; + u32 data; + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x154a }, + { 0x01, 0xffff, 0x068b } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC); + + rtl_w1w0_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); + + rtl_writephy(tp, 0x1f, 0x0c42); + rg_saw_cnt = rtl_readphy(tp, 0x13); + rtl_writephy(tp, 0x1f, 0x0000); + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + data = r8168_mac_ocp_read(tp, 0xd412); + data &= 0x0fff; + data |= sw_cnt_1ms_ini; + r8168_mac_ocp_write(tp, 0xd412, data); + } + + data = r8168_mac_ocp_read(tp, 0xe056); + data &= 0xf0; + data |= 0x07; + r8168_mac_ocp_write(tp, 0xe056, data); + + data = r8168_mac_ocp_read(tp, 0xe052); + data &= 0x8008; + data |= 0x6000; + r8168_mac_ocp_write(tp, 0xe052, data); + + data = r8168_mac_ocp_read(tp, 0xe0d6); + data &= 0x01ff; + data |= 0x017f; + r8168_mac_ocp_write(tp, 0xe0d6, data); + + data = r8168_mac_ocp_read(tp, 0xd420); + data &= 0x0fff; + data |= 0x047f; + r8168_mac_ocp_write(tp, 0xd420, data); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); +} + static void rtl_hw_start_8168(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -5441,6 +5841,11 @@ static void rtl_hw_start_8168(struct net_device *dev) rtl_hw_start_8411_2(tp); break; + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + rtl_hw_start_8168h_1(tp); + break; + default: printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", dev->name, tp->mac_version); @@ -5656,6 +6061,10 @@ static void rtl_hw_start_8101(struct net_device *dev) case RTL_GIGA_MAC_VER_43: rtl_hw_start_8168g_2(tp); break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + rtl_hw_start_8168h_1(tp); + break; } RTL_W8(Cfg9346, Cfg9346_Lock); @@ -5896,7 +6305,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, { struct skb_shared_info *info = skb_shinfo(skb); unsigned int cur_frag, entry; - struct TxDesc * uninitialized_var(txd); + struct TxDesc *uninitialized_var(txd); struct device *d = &tp->pci_dev->dev; entry = tp->cur_tx; @@ -7111,6 +7520,10 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_42: case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; @@ -7248,8 +7661,19 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) RTL_W8(Cfg9346, Cfg9346_Unlock); RTL_W8(Config1, RTL_R8(Config1) | PMEnable); RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); - if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) - tp->features |= RTL_FEATURE_WOL; + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) + tp->features |= RTL_FEATURE_WOL; + if ((RTL_R8(Config3) & LinkUp) != 0) + tp->features |= RTL_FEATURE_WOL; + break; + default: + if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) + tp->features |= RTL_FEATURE_WOL; + break; + } if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) tp->features |= RTL_FEATURE_WOL; tp->features |= rtl_try_msi(tp, cfg); @@ -7276,6 +7700,18 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u64_stats_init(&tp->tx_stats.syncp); /* Get MAC address */ + if (tp->mac_version == RTL_GIGA_MAC_VER_45 || + tp->mac_version == RTL_GIGA_MAC_VER_46 || + tp->mac_version == RTL_GIGA_MAC_VER_47 || + tp->mac_version == RTL_GIGA_MAC_VER_48) { + u16 mac_addr[3]; + + *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xE0, ERIAR_EXGMAC); + *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xE4, ERIAR_EXGMAC); + + if (is_valid_ether_addr((u8 *)mac_addr)) + rtl_rar_set(tp, (u8 *)mac_addr); + } for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = RTL_R8(MAC0 + i); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 65c220f8661d..320609842211 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -78,7 +78,7 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, if (buffer->flags & EFX_TX_BUF_SKB) { (*pkts_compl)++; (*bytes_compl) += buffer->skb->len; - dev_kfree_skb_any((struct sk_buff *) buffer->skb); + dev_consume_skb_any((struct sk_buff *)buffer->skb); netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", tx_queue->queue, tx_queue->read_count); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index ec632e666c56..3aad413e74b4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -17,6 +17,7 @@ #include <linux/mfd/syscon.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/regmap.h> @@ -30,6 +31,12 @@ #define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 +#define EMAC_SPLITTER_CTRL_REG 0x0 +#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 +#define EMAC_SPLITTER_CTRL_SPEED_10 0x2 +#define EMAC_SPLITTER_CTRL_SPEED_100 0x3 +#define EMAC_SPLITTER_CTRL_SPEED_1000 0x0 + struct socfpga_dwmac { int interface; u32 reg_offset; @@ -37,14 +44,46 @@ struct socfpga_dwmac { struct device *dev; struct regmap *sys_mgr_base_addr; struct reset_control *stmmac_rst; + void __iomem *splitter_base; }; +static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) +{ + struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; + void __iomem *splitter_base = dwmac->splitter_base; + u32 val; + + if (!splitter_base) + return; + + val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); + val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK; + + switch (speed) { + case 1000: + val |= EMAC_SPLITTER_CTRL_SPEED_1000; + break; + case 100: + val |= EMAC_SPLITTER_CTRL_SPEED_100; + break; + case 10: + val |= EMAC_SPLITTER_CTRL_SPEED_10; + break; + default: + return; + } + + writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); +} + static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) { struct device_node *np = dev->of_node; struct regmap *sys_mgr_base_addr; u32 reg_offset, reg_shift; int ret; + struct device_node *np_splitter; + struct resource res_splitter; dwmac->stmmac_rst = devm_reset_control_get(dev, STMMAC_RESOURCE_NAME); @@ -73,6 +112,20 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * return -EINVAL; } + np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0); + if (np_splitter) { + if (of_address_to_resource(np_splitter, 0, &res_splitter)) { + dev_info(dev, "Missing emac splitter address\n"); + return -EINVAL; + } + + dwmac->splitter_base = devm_ioremap_resource(dev, &res_splitter); + if (IS_ERR(dwmac->splitter_base)) { + dev_info(dev, "Failed to mapping emac splitter\n"); + return PTR_ERR(dwmac->splitter_base); + } + } + dwmac->reg_offset = reg_offset; dwmac->reg_shift = reg_shift; dwmac->sys_mgr_base_addr = sys_mgr_base_addr; @@ -91,6 +144,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) switch (phymode) { case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII; break; case PHY_INTERFACE_MODE_MII: @@ -102,6 +156,13 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) return -EINVAL; } + /* Overwrite val to GMII if splitter core is enabled. The phymode here + * is the actual phy mode on phy hardware, but phy interface from + * EMAC core is GMII. + */ + if (dwmac->splitter_base) + val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; + regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); ctrl |= val << reg_shift; @@ -196,4 +257,5 @@ const struct stmmac_of_data socfpga_gmac_data = { .setup = socfpga_dwmac_probe, .init = socfpga_dwmac_init, .exit = socfpga_dwmac_exit, + .fix_mac_speed = socfpga_dwmac_fix_mac_speed, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index cf4f38db1c0a..3a08a1f78c73 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -261,11 +261,11 @@ static int stmmac_ethtool_getsettings(struct net_device *dev, ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); /* Get and convert ADV/LP_ADV from the HW AN registers */ - if (priv->hw->mac->get_adv) - priv->hw->mac->get_adv(priv->hw, &adv); - else + if (!priv->hw->mac->get_adv) return -EOPNOTSUPP; /* should never happen indeed */ + priv->hw->mac->get_adv(priv->hw, &adv); + /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ if (adv.pause & STMMAC_PCS_PAUSE) @@ -340,19 +340,17 @@ static int stmmac_ethtool_setsettings(struct net_device *dev, if (cmd->autoneg != AUTONEG_ENABLE) return -EINVAL; - if (cmd->autoneg == AUTONEG_ENABLE) { - mask &= (ADVERTISED_1000baseT_Half | + mask &= (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full); - spin_lock(&priv->lock); - if (priv->hw->mac->ctrl_ane) - priv->hw->mac->ctrl_ane(priv->hw, 1); - spin_unlock(&priv->lock); - } + spin_lock(&priv->lock); + if (priv->hw->mac->ctrl_ane) + priv->hw->mac->ctrl_ane(priv->hw, 1); + spin_unlock(&priv->lock); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 6e6ee226de04..9dbb02d9d9c2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -834,7 +834,7 @@ static int stmmac_init_phy(struct net_device *dev) /* Stop Advertising 1000BASE Capability if interface is not GMII */ if ((interface == PHY_INTERFACE_MODE_MII) || (interface == PHY_INTERFACE_MODE_RMII) || - (max_speed < 1000 && max_speed > 0)) + (max_speed < 1000 && max_speed > 0)) phydev->advertising &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index a5b1e1b776fe..b735fa22ac95 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -253,7 +253,7 @@ int stmmac_mdio_register(struct net_device *ndev) } /* - * If we're going to bind the MAC to this PHY bus, + * If we're going to bind the MAC to this PHY bus, * and no PHY number was provided to the MAC, * use the one probed here. */ @@ -282,7 +282,7 @@ int stmmac_mdio_register(struct net_device *ndev) } if (!found) { - pr_warning("%s: No PHY found\n", ndev->name); + pr_warn("%s: No PHY found\n", ndev->name); mdiobus_unregister(new_bus); mdiobus_free(new_bus); return -ENODEV; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 37f87ff28f03..02d370e58110 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4962,7 +4962,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_cmd |= PCI_COMMAND_PARITY; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); if (pci_try_set_mwi(pdev)) - pr_warning("Could not enable MWI for %s\n", pci_name(pdev)); + pr_warn("Could not enable MWI for %s\n", pci_name(pdev)); cas_program_bridge(pdev); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 8216be46540f..904fd1ab5f6e 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -8717,8 +8717,8 @@ static void niu_divide_channels(struct niu_parent *parent, parent->txchan_per_port[i] = 1; } if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) { - pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", - parent->index, tot_rx, tot_tx); + pr_warn("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n", + parent->index, tot_rx, tot_tx); } } diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index f7415b6bf141..fef5dec2cffe 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -115,7 +115,7 @@ static const struct pci_device_id gem_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, gem_pci_tbl); -static u16 __phy_read(struct gem *gp, int phy_addr, int reg) +static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg) { u32 cmd; int limit = 10000; @@ -141,18 +141,18 @@ static u16 __phy_read(struct gem *gp, int phy_addr, int reg) return cmd & MIF_FRAME_DATA; } -static inline int _phy_read(struct net_device *dev, int mii_id, int reg) +static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg) { struct gem *gp = netdev_priv(dev); - return __phy_read(gp, mii_id, reg); + return __sungem_phy_read(gp, mii_id, reg); } -static inline u16 phy_read(struct gem *gp, int reg) +static inline u16 sungem_phy_read(struct gem *gp, int reg) { - return __phy_read(gp, gp->mii_phy_addr, reg); + return __sungem_phy_read(gp, gp->mii_phy_addr, reg); } -static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) +static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val) { u32 cmd; int limit = 10000; @@ -174,15 +174,15 @@ static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val) } } -static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) +static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val) { struct gem *gp = netdev_priv(dev); - __phy_write(gp, mii_id, reg, val & 0xffff); + __sungem_phy_write(gp, mii_id, reg, val & 0xffff); } -static inline void phy_write(struct gem *gp, int reg, u16 val) +static inline void sungem_phy_write(struct gem *gp, int reg, u16 val) { - __phy_write(gp, gp->mii_phy_addr, reg, val); + __sungem_phy_write(gp, gp->mii_phy_addr, reg, val); } static inline void gem_enable_ints(struct gem *gp) @@ -1687,9 +1687,9 @@ static void gem_init_phy(struct gem *gp) /* Some PHYs used by apple have problem getting back to us, * we do an additional reset here */ - phy_write(gp, MII_BMCR, BMCR_RESET); + sungem_phy_write(gp, MII_BMCR, BMCR_RESET); msleep(20); - if (phy_read(gp, MII_BMCR) != 0xffff) + if (sungem_phy_read(gp, MII_BMCR) != 0xffff) break; if (i == 2) netdev_warn(gp->dev, "GMAC PHY not responding !\n"); @@ -2012,7 +2012,7 @@ static int gem_check_invariants(struct gem *gp) for (i = 0; i < 32; i++) { gp->mii_phy_addr = i; - if (phy_read(gp, MII_BMCR) != 0xffff) + if (sungem_phy_read(gp, MII_BMCR) != 0xffff) break; } if (i == 32) { @@ -2696,13 +2696,13 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Fallthrough... */ case SIOCGMIIREG: /* Read MII PHY register. */ - data->val_out = __phy_read(gp, data->phy_id & 0x1f, + data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, data->reg_num & 0x1f); rc = 0; break; case SIOCSMIIREG: /* Write MII PHY register. */ - __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, + __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); rc = 0; break; @@ -2933,8 +2933,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Fill up the mii_phy structure (even if we won't use it) */ gp->phy_mii.dev = dev; - gp->phy_mii.mdio_read = _phy_read; - gp->phy_mii.mdio_write = _phy_write; + gp->phy_mii.mdio_read = _sungem_phy_read; + gp->phy_mii.mdio_write = _sungem_phy_write; #ifdef CONFIG_PPC_PMAC gp->phy_mii.platform_data = gp->of_node; #endif diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index f67539650c38..edb860947da4 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -37,6 +37,8 @@ MODULE_VERSION(DRV_MODULE_VERSION); */ #define VNET_MAX_RETRIES 10 +static int __vnet_tx_trigger(struct vnet_port *port, u32 start); + /* Ordered from largest major to lowest */ static struct vio_version vnet_versions[] = { { .major = 1, .minor = 0 }, @@ -283,10 +285,18 @@ static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, port->raddr[0], port->raddr[1], port->raddr[2], port->raddr[3], port->raddr[4], port->raddr[5]); - err = -ECONNRESET; + break; } } while (err == -EAGAIN); + if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { + port->stop_rx_idx = end; + port->stop_rx = true; + } else { + port->stop_rx_idx = 0; + port->stop_rx = false; + } + return err; } @@ -451,7 +461,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) struct net_device *dev; struct vnet *vp; u32 end; - + struct vio_net_desc *desc; if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) return 0; @@ -459,7 +469,24 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) if (unlikely(!idx_is_pending(dr, end))) return 0; + /* sync for race conditions with vnet_start_xmit() and tell xmit it + * is time to send a trigger. + */ dr->cons = next_idx(end, dr); + desc = vio_dring_entry(dr, dr->cons); + if (desc->hdr.state == VIO_DESC_READY && port->start_cons) { + /* vnet_start_xmit() just populated this dring but missed + * sending the "start" LDC message to the consumer. + * Send a "start" trigger on its behalf. + */ + if (__vnet_tx_trigger(port, dr->cons) > 0) + port->start_cons = false; + else + port->start_cons = true; + } else { + port->start_cons = true; + } + vp = port->vp; dev = vp->dev; @@ -537,7 +564,7 @@ static void vnet_event(void *arg, int event) } if (unlikely(event != LDC_EVENT_DATA_READY)) { - pr_warning("Unexpected LDC event %d\n", event); + pr_warn("Unexpected LDC event %d\n", event); spin_unlock_irqrestore(&vio->lock, flags); return; } @@ -600,7 +627,7 @@ static void vnet_event(void *arg, int event) local_irq_restore(flags); } -static int __vnet_tx_trigger(struct vnet_port *port) +static int __vnet_tx_trigger(struct vnet_port *port, u32 start) { struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct vio_dring_data hdr = { @@ -611,12 +638,21 @@ static int __vnet_tx_trigger(struct vnet_port *port) .sid = vio_send_sid(&port->vio), }, .dring_ident = dr->ident, - .start_idx = dr->prod, + .start_idx = start, .end_idx = (u32) -1, }; int err, delay; int retries = 0; + if (port->stop_rx) { + err = vnet_send_ack(port, + &port->vio.drings[VIO_DRIVER_RX_RING], + port->stop_rx_idx, -1, + VIO_DRING_STOPPED); + if (err <= 0) + return err; + } + hdr.seq = dr->snd_nxt; delay = 1; do { @@ -737,7 +773,30 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) d->hdr.state = VIO_DESC_READY; - err = __vnet_tx_trigger(port); + /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent + * to notify the consumer that some descriptors are READY. + * After that "start" trigger, no additional triggers are needed until + * a DRING_STOPPED is received from the consumer. The dr->cons field + * (set up by vnet_ack()) has the value of the next dring index + * that has not yet been ack-ed. We send a "start" trigger here + * if, and only if, start_cons is true (reset it afterward). Conversely, + * vnet_ack() should check if the dring corresponding to cons + * is marked READY, but start_cons was false. + * If so, vnet_ack() should send out the missed "start" trigger. + * + * Note that the wmb() above makes sure the cookies et al. are + * not globally visible before the VIO_DESC_READY, and that the + * stores are ordered correctly by the compiler. The consumer will + * not proceed until the VIO_DESC_READY is visible assuring that + * the consumer does not observe anything related to descriptors + * out of order. The HV trap from the LDC start trigger is the + * producer to consumer announcement that work is available to the + * consumer + */ + if (!port->start_cons) + goto ldc_start_done; /* previous trigger suffices */ + + err = __vnet_tx_trigger(port, dr->cons); if (unlikely(err < 0)) { netdev_info(dev, "TX trigger error %d\n", err); d->hdr.state = VIO_DESC_FREE; @@ -745,6 +804,9 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) goto out_dropped_unlock; } +ldc_start_done: + port->start_cons = false; + dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; @@ -1038,6 +1100,7 @@ static int vnet_port_alloc_tx_bufs(struct vnet_port *port) (sizeof(struct ldc_trans_cookie) * 2)); dr->num_entries = VNET_TX_RING_SIZE; dr->prod = dr->cons = 0; + port->start_cons = true; /* need an initial trigger */ dr->pending = VNET_TX_RING_SIZE; dr->ncookies = ncookies; diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h index de5c2c64996f..da4933750d06 100644 --- a/drivers/net/ethernet/sun/sunvnet.h +++ b/drivers/net/ethernet/sun/sunvnet.h @@ -40,6 +40,10 @@ struct vnet_port { struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; struct list_head list; + + u32 stop_rx_idx; + bool stop_rx; + bool start_cons; }; static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index f9bcf7aa88ca..dd9430043536 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1207,7 +1207,6 @@ static int cpmac_remove(struct platform_device *pdev) static struct platform_driver cpmac_driver = { .driver = { .name = "cpmac", - .owner = THIS_MODULE, }, .probe = cpmac_probe, .remove = cpmac_remove, diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index aa8bf45e53dc..0ea78326cc21 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -211,7 +211,6 @@ static struct platform_driver cpsw_phy_sel_driver = { .probe = cpsw_phy_sel_probe, .driver = { .name = "cpsw-phy-sel", - .owner = THIS_MODULE, .of_match_table = cpsw_phy_sel_id_table, }, }; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index e2a00287f8eb..45ba50e4eaec 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -397,6 +397,8 @@ struct cpsw_priv { struct cpdma_ctlr *dma; struct cpdma_chan *txch, *rxch; struct cpsw_ale *ale; + bool rx_pause; + bool tx_pause; /* snapshot of IRQ numbers */ u32 irqs_table[4]; u32 num_irqs; @@ -855,6 +857,12 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, else if (phy->speed == 10) mac_control |= BIT(18); /* In Band mode */ + if (priv->rx_pause) + mac_control |= BIT(3); + + if (priv->tx_pause) + mac_control |= BIT(4); + *link = true; } else { mac_control = 0; @@ -1246,6 +1254,9 @@ static int cpsw_ndo_open(struct net_device *ndev) /* enable statistics collection only on all ports */ __raw_writel(0x7, &priv->regs->stat_port_en); + /* Enable internal fifo flow control */ + writel(0x7, &priv->regs->flow_control); + if (WARN_ON(!priv->data.rx_descs)) priv->data.rx_descs = 128; @@ -1807,6 +1818,30 @@ static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; } +static void cpsw_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + + pause->autoneg = AUTONEG_DISABLE; + pause->rx_pause = priv->rx_pause ? true : false; + pause->tx_pause = priv->tx_pause ? true : false; +} + +static int cpsw_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + bool link; + + priv->rx_pause = pause->rx_pause ? true : false; + priv->tx_pause = pause->tx_pause ? true : false; + + for_each_slave(priv, _cpsw_adjust_link, priv, &link); + + return 0; +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -1820,6 +1855,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .get_sset_count = cpsw_get_sset_count, .get_strings = cpsw_get_strings, .get_ethtool_stats = cpsw_get_ethtool_stats, + .get_pauseparam = cpsw_get_pauseparam, + .set_pauseparam = cpsw_set_pauseparam, .get_wol = cpsw_get_wol, .set_wol = cpsw_set_wol, .get_regs_len = cpsw_get_regs_len, @@ -2255,18 +2292,24 @@ static int cpsw_probe(struct platform_device *pdev) } while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { - for (i = res->start; i <= res->end; i++) { - if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0, - dev_name(&pdev->dev), priv)) { - dev_err(priv->dev, "error attaching irq\n"); - goto clean_ale_ret; - } - priv->irqs_table[k] = i; - priv->num_irqs = k + 1; + if (k >= ARRAY_SIZE(priv->irqs_table)) { + ret = -EINVAL; + goto clean_ale_ret; + } + + ret = devm_request_irq(&pdev->dev, res->start, cpsw_interrupt, + 0, dev_name(&pdev->dev), priv); + if (ret < 0) { + dev_err(priv->dev, "error attaching irq (%d)\n", ret); + goto clean_ale_ret; } + + priv->irqs_table[k] = res->start; k++; } + priv->num_irqs = k; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->netdev_ops = &cpsw_netdev_ops; @@ -2395,7 +2438,6 @@ MODULE_DEVICE_TABLE(of, cpsw_of_mtable); static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw", - .owner = THIS_MODULE, .pm = &cpsw_pm_ops, .of_match_table = cpsw_of_mtable, }, diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 35a139e9a833..ea712512c7d1 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -2083,7 +2083,6 @@ MODULE_DEVICE_TABLE(of, davinci_emac_of_match); static struct platform_driver davinci_emac_driver = { .driver = { .name = "davinci_emac", - .owner = THIS_MODULE, .pm = &davinci_emac_pm_ops, .of_match_table = of_match_ptr(davinci_emac_of_match), }, diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 2791f6f2db11..98655b44b97e 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -481,7 +481,6 @@ MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); static struct platform_driver davinci_mdio_driver = { .driver = { .name = "davinci_mdio", - .owner = THIS_MODULE, .pm = &davinci_mdio_pm_ops, .of_match_table = of_match_ptr(davinci_mdio_of_mtable), }, diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 88c712126692..88818d5054ab 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -956,7 +956,7 @@ static int tile_net_open_aux(struct net_device *dev) */ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { - pr_warning("Failed to start LIPP/LEPP.\n"); + pr_warn("Failed to start LIPP/LEPP\n"); return -EIO; } @@ -2399,8 +2399,7 @@ static int __init network_cpus_setup(char *str) { int rc = cpulist_parse_crop(str, &network_cpus_map); if (rc != 0) { - pr_warning("network_cpus=%s: malformed cpu list\n", - str); + pr_warn("network_cpus=%s: malformed cpu list\n", str); } else { /* Remove dedicated cpus. */ @@ -2409,8 +2408,7 @@ static int __init network_cpus_setup(char *str) if (cpumask_empty(&network_cpus_map)) { - pr_warning("Ignoring network_cpus='%s'.\n", - str); + pr_warn("Ignoring network_cpus='%s'\n", str); } else { char buf[1024]; cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c8fd94133ecd..4ea2d4e6f1d1 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1485,7 +1485,6 @@ static int axienet_of_probe(struct platform_device *op) if (!ndev) return -ENOMEM; - ether_setup(ndev); platform_set_drvdata(op, ndev); SET_NETDEV_DEV(ndev, &op->dev); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index d5e07def6a59..2f48f790c9b4 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -591,7 +591,7 @@ struct nvsp_message { #define NETVSC_RECEIVE_BUFFER_ID 0xcafe -#define NETVSC_PACKET_SIZE 2048 +#define NETVSC_PACKET_SIZE 4096 #define VRSS_SEND_TAB_SIZE 16 @@ -642,7 +642,7 @@ struct netvsc_device { int ring_size; /* The primary channel callback buffer */ - unsigned char cb_buffer[NETVSC_PACKET_SIZE]; + unsigned char *cb_buffer; /* The sub channel callback buffer */ unsigned char *sub_cb_buf; }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 66979cf7fca6..977984bc238a 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -42,6 +42,12 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) if (!net_device) return NULL; + net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL); + if (!net_device->cb_buffer) { + kfree(net_device); + return NULL; + } + init_waitqueue_head(&net_device->wait_drain); net_device->start_remove = false; net_device->destroy = false; @@ -52,6 +58,12 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device) return net_device; } +static void free_netvsc_device(struct netvsc_device *nvdev) +{ + kfree(nvdev->cb_buffer); + kfree(nvdev); +} + static struct netvsc_device *get_outbound_net_device(struct hv_device *device) { struct netvsc_device *net_device; @@ -551,7 +563,7 @@ int netvsc_device_remove(struct hv_device *device) if (net_device->sub_cb_buf) vfree(net_device->sub_cb_buf); - kfree(net_device); + free_netvsc_device(net_device); return 0; } @@ -1042,10 +1054,8 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) struct net_device *ndev; net_device = alloc_net_device(device); - if (!net_device) { - ret = -ENOMEM; - goto cleanup; - } + if (!net_device) + return -ENOMEM; net_device->ring_size = ring_size; @@ -1093,7 +1103,7 @@ close: vmbus_close(device->channel); cleanup: - kfree(net_device); + free_netvsc_device(net_device); return ret; } diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index a04af9d0f8f9..a2e556168286 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -324,12 +324,8 @@ static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r) seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n", h, (unsigned)rd_get_status(rd), j); if (j > 0) { - seq_printf(seq, " data:"); - if (j > 20) - j = 20; - for (i = 0; i < j; i++) - seq_printf(seq, " %02x", (unsigned)((unsigned char *)rd->buf)[i]); - seq_printf(seq, "\n"); + seq_printf(seq, " data: %*ph\n", + min_t(unsigned, j, 20), rd->buf); } } for (i = 0; i < r->size; i++) { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 65de0cab8d07..14afa4f24424 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -159,8 +159,6 @@ config MDIO_OCTEON config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" depends on ARCH_SUNXI - select REGULATOR - select REGULATOR_FIXED_VOLTAGE help This driver supports the MDIO interface found in the network interface units of the Allwinner SoC that have an EMAC (A10, @@ -205,6 +203,14 @@ config MDIO_BUS_MUX_MMIOREG Currently, only 8-bit registers are supported. +config MDIO_BCM_UNIMAC + tristate "Broadcom UniMAC MDIO bus controller" + help + This module provides a driver for the Broadcom UniMAC MDIO busses. + This hardware can be found in the Broadcom GENET Ethernet MAC + controllers as well as some Broadcom Ethernet switches such as the + Starfighter 2 switches. + endif # PHYLIB config MICREL_KS8995MA diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 7dc3d5b304cf..eb3b18b5978b 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -34,3 +34,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o +obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index f3230eef41fd..c456559f6e7f 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c @@ -75,7 +75,6 @@ #include <linux/of_device.h> #include <linux/uaccess.h> - MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION("1.0.0-a"); @@ -100,9 +99,11 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #ifndef MDIO_PMA_10GBR_PMD_CTRL #define MDIO_PMA_10GBR_PMD_CTRL 0x0096 #endif + #ifndef MDIO_PMA_10GBR_FEC_CTRL #define MDIO_PMA_10GBR_FEC_CTRL 0x00ab #endif + #ifndef MDIO_AN_XNP #define MDIO_AN_XNP 0x0016 #endif @@ -110,14 +111,23 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #ifndef MDIO_AN_INTMASK #define MDIO_AN_INTMASK 0x8001 #endif + #ifndef MDIO_AN_INT #define MDIO_AN_INT 0x8002 #endif +#ifndef MDIO_AN_KR_CTRL +#define MDIO_AN_KR_CTRL 0x8003 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif +#ifndef MDIO_KR_CTRL_PDETECT +#define MDIO_KR_CTRL_PDETECT 0x01 +#endif + /* SerDes integration register offsets */ #define SIR0_KR_RT_1 0x002c #define SIR0_STATUS 0x0040 @@ -161,7 +171,6 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define SPEED_1000_TXAMP 0xf #define SPEED_1000_WORD 0x1 - /* SerDes RxTx register offsets */ #define RXTX_REG20 0x0050 #define RXTX_REG114 0x01c8 @@ -255,7 +264,6 @@ do { \ XSIR1_IOWRITE((_priv), _reg, reg_val); \ } while (0) - /* Macros for reading or writing SerDes RxTx registers * The ioread macros will get bit fields or full values using the * register definitions formed using the input names @@ -283,7 +291,6 @@ do { \ XRXTX_IOWRITE((_priv), _reg, reg_val); \ } while (0) - enum amd_xgbe_phy_an { AMD_XGBE_AN_READY = 0, AMD_XGBE_AN_START, @@ -331,7 +338,6 @@ struct amd_xgbe_phy_priv { /* Maintain link status for re-starting auto-negotiation */ unsigned int link; - enum amd_xgbe_phy_mode mode; unsigned int speed_set; /* Auto-negotiation state machine support */ @@ -342,6 +348,7 @@ struct amd_xgbe_phy_priv { enum amd_xgbe_phy_rx kx_state; struct work_struct an_work; struct workqueue_struct *an_workqueue; + unsigned int parallel_detect; }; static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev) @@ -468,8 +475,6 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) amd_xgbe_phy_serdes_complete_ratechange(phydev); - priv->mode = AMD_XGBE_MODE_KR; - return 0; } @@ -518,8 +523,6 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev) amd_xgbe_phy_serdes_complete_ratechange(phydev); - priv->mode = AMD_XGBE_MODE_KX; - return 0; } @@ -568,18 +571,43 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev) amd_xgbe_phy_serdes_complete_ratechange(phydev); - priv->mode = AMD_XGBE_MODE_KX; + return 0; +} + +static int amd_xgbe_phy_cur_mode(struct phy_device *phydev, + enum amd_xgbe_phy_mode *mode) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); + if (ret < 0) + return ret; + + if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR) + *mode = AMD_XGBE_MODE_KR; + else + *mode = AMD_XGBE_MODE_KX; return 0; } +static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev) +{ + enum amd_xgbe_phy_mode mode; + + if (amd_xgbe_phy_cur_mode(phydev, &mode)) + return false; + + return (mode == AMD_XGBE_MODE_KR); +} + static int amd_xgbe_phy_switch_mode(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; int ret; /* If we are in KR switch to KX, and vice-versa */ - if (priv->mode == AMD_XGBE_MODE_KR) { + if (amd_xgbe_phy_in_kr_mode(phydev)) { if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000) ret = amd_xgbe_phy_gmii_mode(phydev); else @@ -591,15 +619,20 @@ static int amd_xgbe_phy_switch_mode(struct phy_device *phydev) return ret; } -static enum amd_xgbe_phy_an amd_xgbe_an_switch_mode(struct phy_device *phydev) +static int amd_xgbe_phy_set_mode(struct phy_device *phydev, + enum amd_xgbe_phy_mode mode) { + enum amd_xgbe_phy_mode cur_mode; int ret; - ret = amd_xgbe_phy_switch_mode(phydev); - if (ret < 0) - return AMD_XGBE_AN_ERROR; + ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode); + if (ret) + return ret; - return AMD_XGBE_AN_START; + if (mode != cur_mode) + ret = amd_xgbe_phy_switch_mode(phydev); + + return ret; } static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev, @@ -610,8 +643,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev, *state = AMD_XGBE_RX_COMPLETE; - /* If we're in KX mode then we're done */ - if (priv->mode == AMD_XGBE_MODE_KX) + /* If we're not in KR mode then we're done */ + if (!amd_xgbe_phy_in_kr_mode(phydev)) return AMD_XGBE_AN_EVENT; /* Enable/Disable FEC */ @@ -669,7 +702,6 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev, static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev, enum amd_xgbe_phy_rx *state) { - struct amd_xgbe_phy_priv *priv = phydev->priv; unsigned int link_support; int ret, ad_reg, lp_reg; @@ -679,9 +711,9 @@ static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev, return AMD_XGBE_AN_ERROR; /* Check for a supported mode, otherwise restart in a different one */ - link_support = (priv->mode == AMD_XGBE_MODE_KR) ? 0x80 : 0x20; + link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20; if (!(ret & link_support)) - return amd_xgbe_an_switch_mode(phydev); + return AMD_XGBE_AN_INCOMPAT_LINK; /* Check Extended Next Page support */ ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE); @@ -722,7 +754,7 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev) int ret; /* Be sure we aren't looping trying to negotiate */ - if (priv->mode == AMD_XGBE_MODE_KR) { + if (amd_xgbe_phy_in_kr_mode(phydev)) { if (priv->kr_state != AMD_XGBE_RX_READY) return AMD_XGBE_AN_NO_LINK; priv->kr_state = AMD_XGBE_RX_BPA; @@ -785,6 +817,13 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev) /* Enable and start auto-negotiation */ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0); + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL); + if (ret < 0) + return AMD_XGBE_AN_ERROR; + + ret |= MDIO_KR_CTRL_PDETECT; + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_KR_CTRL, ret); + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); if (ret < 0) return AMD_XGBE_AN_ERROR; @@ -825,8 +864,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev) enum amd_xgbe_phy_rx *state; int ret; - state = (priv->mode == AMD_XGBE_MODE_KR) ? &priv->kr_state - : &priv->kx_state; + state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state + : &priv->kx_state; switch (*state) { case AMD_XGBE_RX_BPA: @@ -846,7 +885,13 @@ static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev) static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev) { - return amd_xgbe_an_switch_mode(phydev); + int ret; + + ret = amd_xgbe_phy_switch_mode(phydev); + if (ret) + return AMD_XGBE_AN_ERROR; + + return AMD_XGBE_AN_START; } static void amd_xgbe_an_state_machine(struct work_struct *work) @@ -859,6 +904,10 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) int sleep; unsigned int an_supported = 0; + /* Start in KX mode */ + if (amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX)) + priv->an_state = AMD_XGBE_AN_ERROR; + while (1) { mutex_lock(&priv->an_mutex); @@ -866,8 +915,9 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) switch (priv->an_state) { case AMD_XGBE_AN_START: - priv->an_state = amd_xgbe_an_start(phydev); an_supported = 0; + priv->parallel_detect = 0; + priv->an_state = amd_xgbe_an_start(phydev); break; case AMD_XGBE_AN_EVENT: @@ -884,6 +934,7 @@ static void amd_xgbe_an_state_machine(struct work_struct *work) break; case AMD_XGBE_AN_COMPLETE: + priv->parallel_detect = an_supported ? 0 : 1; netdev_info(phydev->attached_dev, "%s successful\n", an_supported ? "Auto negotiation" : "Parallel detection"); @@ -1018,7 +1069,6 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; u32 mmd_mask = phydev->c45_ids.devices_in_package; - int ret; if (phydev->autoneg != AUTONEG_ENABLE) return amd_xgbe_phy_setup_forced(phydev); @@ -1027,11 +1077,6 @@ static int amd_xgbe_phy_config_aneg(struct phy_device *phydev) if (!(mmd_mask & MDIO_DEVS_AN)) return -EINVAL; - /* Get the current speed mode */ - ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); - if (ret < 0) - return ret; - /* Start/Restart the auto-negotiation state machine */ mutex_lock(&priv->an_mutex); priv->an_result = AMD_XGBE_AN_READY; @@ -1121,18 +1166,14 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev) { struct amd_xgbe_phy_priv *priv = phydev->priv; u32 mmd_mask = phydev->c45_ids.devices_in_package; - int ret, mode, ad_ret, lp_ret; + int ret, ad_ret, lp_ret; ret = amd_xgbe_phy_update_link(phydev); if (ret) return ret; - mode = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); - if (mode < 0) - return mode; - mode &= MDIO_PCS_CTRL2_TYPE; - - if (phydev->autoneg == AUTONEG_ENABLE) { + if ((phydev->autoneg == AUTONEG_ENABLE) && + !priv->parallel_detect) { if (!(mmd_mask & MDIO_DEVS_AN)) return -EINVAL; @@ -1163,40 +1204,39 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev) ad_ret &= lp_ret; if (ad_ret & 0x80) { phydev->speed = SPEED_10000; - if (mode != MDIO_PCS_CTRL2_10GBR) { - ret = amd_xgbe_phy_xgmii_mode(phydev); - if (ret < 0) - return ret; - } + ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR); + if (ret) + return ret; } else { - int (*mode_fcn)(struct phy_device *); - - if (priv->speed_set == - AMD_XGBE_PHY_SPEEDSET_1000_10000) { + switch (priv->speed_set) { + case AMD_XGBE_PHY_SPEEDSET_1000_10000: phydev->speed = SPEED_1000; - mode_fcn = amd_xgbe_phy_gmii_mode; - } else { + break; + + case AMD_XGBE_PHY_SPEEDSET_2500_10000: phydev->speed = SPEED_2500; - mode_fcn = amd_xgbe_phy_gmii_2500_mode; + break; } - if (mode == MDIO_PCS_CTRL2_10GBR) { - ret = mode_fcn(phydev); - if (ret < 0) - return ret; - } + ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX); + if (ret) + return ret; } phydev->duplex = DUPLEX_FULL; } else { - if (mode == MDIO_PCS_CTRL2_10GBR) { + if (amd_xgbe_phy_in_kr_mode(phydev)) { phydev->speed = SPEED_10000; } else { - if (priv->speed_set == - AMD_XGBE_PHY_SPEEDSET_1000_10000) + switch (priv->speed_set) { + case AMD_XGBE_PHY_SPEEDSET_1000_10000: phydev->speed = SPEED_1000; - else + break; + + case AMD_XGBE_PHY_SPEEDSET_2500_10000: phydev->speed = SPEED_2500; + break; + } } phydev->duplex = DUPLEX_FULL; phydev->pause = 0; @@ -1329,14 +1369,6 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) priv->link = 1; - ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2); - if (ret < 0) - goto err_sir1; - if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR) - priv->mode = AMD_XGBE_MODE_KR; - else - priv->mode = AMD_XGBE_MODE_KX; - mutex_init(&priv->an_mutex); INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine); priv->an_workqueue = create_singlethread_workqueue(wq_name); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index fdce1ea28790..daae69950925 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -14,6 +14,7 @@ #include <linux/delay.h> #include <linux/bitops.h> #include <linux/brcmphy.h> +#include <linux/mdio.h> /* Broadcom BCM7xxx internal PHY registers */ #define MII_BCM7XXX_CHANNEL_WIDTH 0x2000 @@ -146,15 +147,79 @@ static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev) return 0; } +static int bcm7xxx_apd_enable(struct phy_device *phydev) +{ + int val; + + /* Enable powering down of the DLL during auto-power down */ + val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3); + if (val < 0) + return val; + + val |= BCM54XX_SHD_SCR3_DLLAPD_DIS; + bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val); + + /* Enable auto-power down */ + val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD); + if (val < 0) + return val; + + val |= BCM54XX_SHD_APD_EN; + return bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val); +} + +static int bcm7xxx_eee_enable(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, + MDIO_MMD_AN, phydev->addr); + if (val < 0) + return val; + + /* Enable general EEE feature at the PHY level */ + val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X; + + phy_write_mmd_indirect(phydev, BRCM_CL45VEN_EEE_CONTROL, + MDIO_MMD_AN, phydev->addr, val); + + /* Advertise supported modes */ + val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, + MDIO_MMD_AN, phydev->addr); + + val |= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); + phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, + MDIO_MMD_AN, phydev->addr, val); + + return 0; +} + static int bcm7xxx_28nm_config_init(struct phy_device *phydev) { - int ret; + u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags); + u8 patch = PHY_BRCM_7XXX_PATCH(phydev->dev_flags); + int ret = 0; + + dev_info(&phydev->dev, "PHY revision: 0x%02x, patch: %d\n", rev, patch); + + switch (rev) { + case 0xa0: + case 0xb0: + ret = bcm7445_config_init(phydev); + break; + default: + ret = bcm7xxx_28nm_afe_config_init(phydev); + break; + } - ret = bcm7445_config_init(phydev); if (ret) return ret; - return bcm7xxx_28nm_afe_config_init(phydev); + ret = bcm7xxx_eee_enable(phydev); + if (ret) + return ret; + + return bcm7xxx_apd_enable(phydev); } static int bcm7xxx_28nm_resume(struct phy_device *phydev) @@ -201,8 +266,8 @@ static int bcm7xxx_config_init(struct phy_device *phydev) phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO); phy_read(phydev, MII_BCM7XXX_AUX_MODE); - /* Workaround only required for 100Mbits/sec */ - if (!(phydev->dev_flags & PHY_BRCM_100MBPS_WAR)) + /* Workaround only required for 100Mbits/sec capable PHYs */ + if (phydev->supported & PHY_GBIT_FEATURES) return 0; /* set shadow mode 2 */ @@ -263,44 +328,28 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev) return 0; } +#define BCM7XXX_28NM_GPHY(_oui, _name) \ +{ \ + .phy_id = (_oui), \ + .phy_id_mask = 0xfffffff0, \ + .name = _name, \ + .features = PHY_GBIT_FEATURES | \ + SUPPORTED_Pause | SUPPORTED_Asym_Pause, \ + .flags = PHY_IS_INTERNAL, \ + .config_init = bcm7xxx_28nm_afe_config_init, \ + .config_aneg = genphy_config_aneg, \ + .read_status = genphy_read_status, \ + .resume = bcm7xxx_28nm_resume, \ + .driver = { .owner = THIS_MODULE }, \ +} + static struct phy_driver bcm7xxx_driver[] = { + BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), + BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), { - .phy_id = PHY_ID_BCM7366, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7366", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_28nm_afe_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .resume = bcm7xxx_28nm_resume, - .driver = { .owner = THIS_MODULE }, -}, { - .phy_id = PHY_ID_BCM7439, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7439", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_28nm_afe_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .resume = bcm7xxx_28nm_resume, - .driver = { .owner = THIS_MODULE }, -}, { - .phy_id = PHY_ID_BCM7445, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7445", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_28nm_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .resume = bcm7xxx_28nm_afe_config_init, - .driver = { .owner = THIS_MODULE }, -}, { .phy_id = PHY_BCM_OUI_4, .phy_id_mask = 0xffff0000, .name = "Broadcom BCM7XXX 40nm", @@ -329,6 +378,8 @@ static struct phy_driver bcm7xxx_driver[] = { } }; static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { + { PHY_ID_BCM7250, 0xfffffff0, }, + { PHY_ID_BCM7364, 0xfffffff0, }, { PHY_ID_BCM7366, 0xfffffff0, }, { PHY_ID_BCM7439, 0xfffffff0, }, { PHY_ID_BCM7445, 0xfffffff0, }, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 34088d60da74..854f2c9a7b2b 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -25,132 +25,10 @@ #define BRCM_PHY_REV(phydev) \ ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask)) -/* - * Broadcom LED source encodings. These are used in BCM5461, BCM5481, - * BCM5482, and possibly some others. - */ -#define BCM_LED_SRC_LINKSPD1 0x0 -#define BCM_LED_SRC_LINKSPD2 0x1 -#define BCM_LED_SRC_XMITLED 0x2 -#define BCM_LED_SRC_ACTIVITYLED 0x3 -#define BCM_LED_SRC_FDXLED 0x4 -#define BCM_LED_SRC_SLAVE 0x5 -#define BCM_LED_SRC_INTR 0x6 -#define BCM_LED_SRC_QUALITY 0x7 -#define BCM_LED_SRC_RCVLED 0x8 -#define BCM_LED_SRC_MULTICOLOR1 0xa -#define BCM_LED_SRC_OPENSHORT 0xb -#define BCM_LED_SRC_OFF 0xe /* Tied high */ -#define BCM_LED_SRC_ON 0xf /* Tied low */ - - -/* - * BCM5482: Shadow registers - * Shadow values go into bits [14:10] of register 0x1c to select a shadow - * register to access. - */ -/* 00101: Spare Control Register 3 */ -#define BCM54XX_SHD_SCR3 0x05 -#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 -#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 -#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 - -/* 01010: Auto Power-Down */ -#define BCM54XX_SHD_APD 0x0a -#define BCM54XX_SHD_APD_EN 0x0020 - -#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ - /* LED3 / ~LINKSPD[2] selector */ -#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) - /* LED1 / ~LINKSPD[1] selector */ -#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) -#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */ -#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ -#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ -#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ -#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ -#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ - - -/* - * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) - */ -#define MII_BCM54XX_EXP_AADJ1CH0 0x001f -#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200 -#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100 -#define MII_BCM54XX_EXP_AADJ1CH3 0x601f -#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002 -#define MII_BCM54XX_EXP_EXP08 0x0F08 -#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 -#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 -#define MII_BCM54XX_EXP_EXP75 0x0f75 -#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c -#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 -#define MII_BCM54XX_EXP_EXP96 0x0f96 -#define MII_BCM54XX_EXP_EXP96_MYST 0x0010 -#define MII_BCM54XX_EXP_EXP97 0x0f97 -#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c - -/* - * BCM5482: Secondary SerDes registers - */ -#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ -#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */ -#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */ -#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ -#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ - - -/*****************************************************************************/ -/* Fast Ethernet Transceiver definitions. */ -/*****************************************************************************/ - -#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */ -#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */ -#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */ -#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */ -#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */ -#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */ - -#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */ -#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */ - - -/*** Shadow register definitions ***/ - -#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */ -#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */ - -#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */ -#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003 -#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001 - -#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ -#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ - - MODULE_DESCRIPTION("Broadcom PHY driver"); MODULE_AUTHOR("Maciej W. Rozycki"); MODULE_LICENSE("GPL"); -/* - * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T - * 0x1c shadow registers. - */ -static int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow) -{ - phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); - return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); -} - -static int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, u16 val) -{ - return phy_write(phydev, MII_BCM54XX_SHD, - MII_BCM54XX_SHD_WRITE | - MII_BCM54XX_SHD_VAL(shadow) | - MII_BCM54XX_SHD_DATA(val)); -} - /* Indirect register access functions for the Expansion Registers */ static int bcm54xx_exp_read(struct phy_device *phydev, u16 regnum) { diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index c301e4cb37ca..2954052706e8 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -721,7 +721,7 @@ static inline u16 exts_chan_to_edata(int ch) } static int decode_evnt(struct dp83640_private *dp83640, - void *data, u16 ests) + void *data, int len, u16 ests) { struct phy_txts *phy_txts; struct ptp_clock_event event; @@ -729,6 +729,16 @@ static int decode_evnt(struct dp83640_private *dp83640, int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK; u16 ext_status = 0; + /* calculate length of the event timestamp status message */ + if (ests & MULT_EVNT) + parsed = (words + 2) * sizeof(u16); + else + parsed = (words + 1) * sizeof(u16); + + /* check if enough data is available */ + if (len < parsed) + return len; + if (ests & MULT_EVNT) { ext_status = *(u16 *) data; data += sizeof(ext_status); @@ -747,10 +757,7 @@ static int decode_evnt(struct dp83640_private *dp83640, dp83640->edata.ns_lo = phy_txts->ns_lo; } - if (ext_status) { - parsed = words + 2; - } else { - parsed = words + 1; + if (!ext_status) { i = ((ests >> EVNT_NUM_SHIFT) & EVNT_NUM_MASK) - EXT_EVENT; ext_status = exts_chan_to_edata(i); } @@ -768,7 +775,7 @@ static int decode_evnt(struct dp83640_private *dp83640, } } - return parsed * sizeof(u16); + return parsed; } static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) @@ -905,9 +912,9 @@ static void decode_status_frame(struct dp83640_private *dp83640, decode_txts(dp83640, phy_txts); size = sizeof(*phy_txts); - } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) { + } else if (PSF_EVNT == type) { - size = decode_evnt(dp83640, ptr, ests); + size = decode_evnt(dp83640, ptr, len, ests); } else { size = 0; @@ -1129,7 +1136,6 @@ static void dp83640_remove(struct phy_device *phydev) struct dp83640_clock *clock; struct list_head *this, *next; struct dp83640_private *tmp, *dp83640 = phydev->priv; - struct sk_buff *skb; if (phydev->addr == BROADCAST_ADDR) return; @@ -1137,11 +1143,8 @@ static void dp83640_remove(struct phy_device *phydev) enable_status_frames(phydev, false); cancel_work_sync(&dp83640->ts_work); - while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL) - kfree_skb(skb); - - while ((skb = skb_dequeue(&dp83640->tx_queue)) != NULL) - skb_complete_tx_timestamp(skb, NULL); + skb_queue_purge(&dp83640->rx_queue); + skb_queue_purge(&dp83640->tx_queue); clock = dp83640_clock_get(dp83640->clock); @@ -1398,7 +1401,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, case HWTSTAMP_TX_ONESTEP_SYNC: if (is_sync(skb, type)) { - skb_complete_tx_timestamp(skb, NULL); + kfree_skb(skb); return; } /* fall through */ @@ -1409,7 +1412,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, case HWTSTAMP_TX_OFF: default: - skb_complete_tx_timestamp(skb, NULL); + kfree_skb(skb); break; } } diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index d60d875cb445..5b19fbbda6d4 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c @@ -124,6 +124,17 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) if (reg_num >= MII_REGS_NUM) return -1; + /* We do not support emulating Clause 45 over Clause 22 register reads + * return an error instead of bogus data. + */ + switch (reg_num) { + case MII_MMD_CTRL: + case MII_MMD_DATA: + return -1; + default: + break; + } + list_for_each_entry(fp, &fmb->phys, node) { if (fp->addr == phy_addr) { /* Issue callback if user registered it. */ diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c new file mode 100644 index 000000000000..5b643e588e8f --- /dev/null +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -0,0 +1,213 @@ +/* + * Broadcom UniMAC MDIO bus controller driver + * + * Copyright (C) 2014, Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/delay.h> + +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_mdio.h> + +#define MDIO_CMD 0x00 +#define MDIO_START_BUSY (1 << 29) +#define MDIO_READ_FAIL (1 << 28) +#define MDIO_RD (2 << 26) +#define MDIO_WR (1 << 26) +#define MDIO_PMD_SHIFT 21 +#define MDIO_PMD_MASK 0x1F +#define MDIO_REG_SHIFT 16 +#define MDIO_REG_MASK 0x1F + +#define MDIO_CFG 0x04 +#define MDIO_C22 (1 << 0) +#define MDIO_C45 0 +#define MDIO_CLK_DIV_SHIFT 4 +#define MDIO_CLK_DIV_MASK 0x3F +#define MDIO_SUPP_PREAMBLE (1 << 12) + +struct unimac_mdio_priv { + struct mii_bus *mii_bus; + void __iomem *base; +}; + +static inline void unimac_mdio_start(struct unimac_mdio_priv *priv) +{ + u32 reg; + + reg = __raw_readl(priv->base + MDIO_CMD); + reg |= MDIO_START_BUSY; + __raw_writel(reg, priv->base + MDIO_CMD); +} + +static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv) +{ + return __raw_readl(priv->base + MDIO_CMD) & MDIO_START_BUSY; +} + +static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct unimac_mdio_priv *priv = bus->priv; + unsigned int timeout = 1000; + u32 cmd; + + /* Prepare the read operation */ + cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT); + __raw_writel(cmd, priv->base + MDIO_CMD); + + /* Start MDIO transaction */ + unimac_mdio_start(priv); + + do { + if (!unimac_mdio_busy(priv)) + break; + + usleep_range(1000, 2000); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + cmd = __raw_readl(priv->base + MDIO_CMD); + if (cmd & MDIO_READ_FAIL) + return -EIO; + + return cmd & 0xffff; +} + +static int unimac_mdio_write(struct mii_bus *bus, int phy_id, + int reg, u16 val) +{ + struct unimac_mdio_priv *priv = bus->priv; + unsigned int timeout = 1000; + u32 cmd; + + /* Prepare the write operation */ + cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | + (reg << MDIO_REG_SHIFT) | (0xffff & val); + __raw_writel(cmd, priv->base + MDIO_CMD); + + unimac_mdio_start(priv); + + do { + if (!unimac_mdio_busy(priv)) + break; + + usleep_range(1000, 2000); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static int unimac_mdio_probe(struct platform_device *pdev) +{ + struct unimac_mdio_priv *priv; + struct device_node *np; + struct mii_bus *bus; + struct resource *r; + int ret; + + np = pdev->dev.of_node; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + /* Just ioremap, as this MDIO block is usually integrated into an + * Ethernet MAC controller register range + */ + priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!priv->base) { + dev_err(&pdev->dev, "failed to remap register\n"); + return -ENOMEM; + } + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) + return -ENOMEM; + + bus = priv->mii_bus; + bus->priv = priv; + bus->name = "unimac MII bus"; + bus->parent = &pdev->dev; + bus->read = unimac_mdio_read; + bus->write = unimac_mdio_write; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + + bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); + if (!bus->irq) { + ret = -ENOMEM; + goto out_mdio_free; + } + + ret = of_mdiobus_register(bus, np); + if (ret) { + dev_err(&pdev->dev, "MDIO bus registration failed\n"); + goto out_mdio_irq; + } + + platform_set_drvdata(pdev, priv); + + dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base); + + return 0; + +out_mdio_irq: + kfree(bus->irq); +out_mdio_free: + mdiobus_free(bus); + return ret; +} + +static int unimac_mdio_remove(struct platform_device *pdev) +{ + struct unimac_mdio_priv *priv = platform_get_drvdata(pdev); + + mdiobus_unregister(priv->mii_bus); + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); + + return 0; +} + +static struct of_device_id unimac_mdio_ids[] = { + { .compatible = "brcm,genet-mdio-v4", }, + { .compatible = "brcm,genet-mdio-v3", }, + { .compatible = "brcm,genet-mdio-v2", }, + { .compatible = "brcm,genet-mdio-v1", }, + { .compatible = "brcm,unimac-mdio", }, + { /* sentinel */ }, +}; + +static struct platform_driver unimac_mdio_driver = { + .driver = { + .name = "unimac-mdio", + .owner = THIS_MODULE, + .of_match_table = unimac_mdio_ids, + }, + .probe = unimac_mdio_probe, + .remove = unimac_mdio_remove, +}; +module_platform_driver(unimac_mdio_driver); + +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom UniMAC MDIO bus controller"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:unimac-mdio"); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 4eaadcfcb0fe..50051f271b10 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -553,8 +553,14 @@ static ssize_t phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) { struct phy_device *phydev = to_phy_device(dev); + const char *mode = NULL; - return sprintf(buf, "%s\n", phy_modes(phydev->interface)); + if (phy_is_internal(phydev)) + mode = "internal"; + else + mode = phy_modes(phydev->interface); + + return sprintf(buf, "%s\n", mode); } static DEVICE_ATTR_RO(phy_interface); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a854d38c231d..1dfffdc9dfc3 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -955,7 +955,7 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad, * 3) Write reg 13 // MMD Data Command for MMD DEVAD * 3) Read reg 14 // Read MMD data */ -static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad, int addr) { struct phy_driver *phydrv = phydev->drv; @@ -971,6 +971,7 @@ static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, } return value; } +EXPORT_SYMBOL(phy_read_mmd_indirect); /** * phy_write_mmd_indirect - writes data to the MMD registers @@ -988,7 +989,7 @@ static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, * 3) Write reg 13 // MMD Data Command for MMD DEVAD * 3) Write reg 14 // Write MMD data */ -static void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, +void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, int devad, int addr, u32 data) { struct phy_driver *phydrv = phydev->drv; @@ -1002,6 +1003,7 @@ static void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data); } } +EXPORT_SYMBOL(phy_write_mmd_indirect); /** * phy_init_eee - init and check the EEE feature @@ -1017,12 +1019,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) { /* According to 802.3az,the EEE is supported only in full duplex-mode. * Also EEE feature is active when core is operating with MII, GMII - * or RGMII. + * or RGMII. Internal PHYs are also allowed to proceed and should + * return an error if they do not support EEE. */ if ((phydev->duplex == DUPLEX_FULL) && ((phydev->interface == PHY_INTERFACE_MODE_MII) || (phydev->interface == PHY_INTERFACE_MODE_GMII) || - (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { + (phydev->interface == PHY_INTERFACE_MODE_RGMII) || + phy_is_internal(phydev))) { int eee_lp, eee_cap, eee_adv; u32 lp, cap, adv; int status; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ca5ec3e18d36..3fc91e89f5a5 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -230,13 +230,13 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, for (i = 1; i < num_ids && c45_ids->devices_in_package == 0; i++) { - reg_addr = MII_ADDR_C45 | i << 16 | 6; + reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; c45_ids->devices_in_package = (phy_reg & 0xffff) << 16; - reg_addr = MII_ADDR_C45 | i << 16 | 5; + reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS1; phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c index ae7cd7f3656d..92578d72e4ee 100644 --- a/drivers/net/sungem_phy.c +++ b/drivers/net/sungem_phy.c @@ -47,22 +47,22 @@ static const int phy_BCM5400_link_table[8][3] = { { 1, 0, 1 }, /* 1000BT */ }; -static inline int __phy_read(struct mii_phy* phy, int id, int reg) +static inline int __sungem_phy_read(struct mii_phy* phy, int id, int reg) { return phy->mdio_read(phy->dev, id, reg); } -static inline void __phy_write(struct mii_phy* phy, int id, int reg, int val) +static inline void __sungem_phy_write(struct mii_phy* phy, int id, int reg, int val) { phy->mdio_write(phy->dev, id, reg, val); } -static inline int phy_read(struct mii_phy* phy, int reg) +static inline int sungem_phy_read(struct mii_phy* phy, int reg) { return phy->mdio_read(phy->dev, phy->mii_id, reg); } -static inline void phy_write(struct mii_phy* phy, int reg, int val) +static inline void sungem_phy_write(struct mii_phy* phy, int reg, int val) { phy->mdio_write(phy->dev, phy->mii_id, reg, val); } @@ -72,21 +72,21 @@ static int reset_one_mii_phy(struct mii_phy* phy, int phy_id) u16 val; int limit = 10000; - val = __phy_read(phy, phy_id, MII_BMCR); + val = __sungem_phy_read(phy, phy_id, MII_BMCR); val &= ~(BMCR_ISOLATE | BMCR_PDOWN); val |= BMCR_RESET; - __phy_write(phy, phy_id, MII_BMCR, val); + __sungem_phy_write(phy, phy_id, MII_BMCR, val); udelay(100); while (--limit) { - val = __phy_read(phy, phy_id, MII_BMCR); + val = __sungem_phy_read(phy, phy_id, MII_BMCR); if ((val & BMCR_RESET) == 0) break; udelay(10); } if ((val & BMCR_ISOLATE) && limit > 0) - __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); + __sungem_phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); return limit <= 0; } @@ -95,19 +95,19 @@ static int bcm5201_init(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5201_MULTIPHY); + data = sungem_phy_read(phy, MII_BCM5201_MULTIPHY); data &= ~MII_BCM5201_MULTIPHY_SUPERISOLATE; - phy_write(phy, MII_BCM5201_MULTIPHY, data); + sungem_phy_write(phy, MII_BCM5201_MULTIPHY, data); - phy_write(phy, MII_BCM5201_INTERRUPT, 0); + sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0); return 0; } static int bcm5201_suspend(struct mii_phy* phy) { - phy_write(phy, MII_BCM5201_INTERRUPT, 0); - phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE); + sungem_phy_write(phy, MII_BCM5201_INTERRUPT, 0); + sungem_phy_write(phy, MII_BCM5201_MULTIPHY, MII_BCM5201_MULTIPHY_SUPERISOLATE); return 0; } @@ -116,20 +116,20 @@ static int bcm5221_init(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); - phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, data | MII_BCM5221_SHDOW_AUX_STAT2_APD); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); - phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR); - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); return 0; @@ -139,12 +139,12 @@ static int bcm5221_suspend(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); - phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE); return 0; @@ -154,20 +154,20 @@ static int bcm5241_init(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); - phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_STAT2); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_STAT2, data | MII_BCM5221_SHDOW_AUX_STAT2_APD); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); - phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data & ~MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data & ~MII_BCM5221_TEST_ENABLE_SHADOWS); return 0; @@ -177,12 +177,12 @@ static int bcm5241_suspend(struct mii_phy* phy) { u16 data; - data = phy_read(phy, MII_BCM5221_TEST); - phy_write(phy, MII_BCM5221_TEST, + data = sungem_phy_read(phy, MII_BCM5221_TEST); + sungem_phy_write(phy, MII_BCM5221_TEST, data | MII_BCM5221_TEST_ENABLE_SHADOWS); - data = phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); - phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, + data = sungem_phy_read(phy, MII_BCM5221_SHDOW_AUX_MODE4); + sungem_phy_write(phy, MII_BCM5221_SHDOW_AUX_MODE4, data | MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR); return 0; @@ -193,26 +193,26 @@ static int bcm5400_init(struct mii_phy* phy) u16 data; /* Configure for gigabit full duplex */ - data = phy_read(phy, MII_BCM5400_AUXCONTROL); + data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL); data |= MII_BCM5400_AUXCONTROL_PWR10BASET; - phy_write(phy, MII_BCM5400_AUXCONTROL, data); + sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data); - data = phy_read(phy, MII_BCM5400_GB_CONTROL); + data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_BCM5400_GB_CONTROL, data); + sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(100); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); - data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); + data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); data |= MII_BCM5201_MULTIPHY_SERIALMODE; - __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); + __sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); - data = phy_read(phy, MII_BCM5400_AUXCONTROL); + data = sungem_phy_read(phy, MII_BCM5400_AUXCONTROL); data &= ~MII_BCM5400_AUXCONTROL_PWR10BASET; - phy_write(phy, MII_BCM5400_AUXCONTROL, data); + sungem_phy_write(phy, MII_BCM5400_AUXCONTROL, data); return 0; } @@ -220,7 +220,7 @@ static int bcm5400_init(struct mii_phy* phy) static int bcm5400_suspend(struct mii_phy* phy) { #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ - phy_write(phy, MII_BMCR, BMCR_PDOWN); + sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN); #endif return 0; } @@ -230,7 +230,7 @@ static int bcm5401_init(struct mii_phy* phy) u16 data; int rev; - rev = phy_read(phy, MII_PHYSID2) & 0x000f; + rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f; if (rev == 0 || rev == 3) { /* Some revisions of 5401 appear to need this * initialisation sequence to disable, according @@ -243,32 +243,32 @@ static int bcm5401_init(struct mii_phy* phy) * Note: This should (and does) match tg3_init_5401phy_dsp * in the tg3.c driver. -DaveM */ - phy_write(phy, 0x18, 0x0c20); - phy_write(phy, 0x17, 0x0012); - phy_write(phy, 0x15, 0x1804); - phy_write(phy, 0x17, 0x0013); - phy_write(phy, 0x15, 0x1204); - phy_write(phy, 0x17, 0x8006); - phy_write(phy, 0x15, 0x0132); - phy_write(phy, 0x17, 0x8006); - phy_write(phy, 0x15, 0x0232); - phy_write(phy, 0x17, 0x201f); - phy_write(phy, 0x15, 0x0a20); + sungem_phy_write(phy, 0x18, 0x0c20); + sungem_phy_write(phy, 0x17, 0x0012); + sungem_phy_write(phy, 0x15, 0x1804); + sungem_phy_write(phy, 0x17, 0x0013); + sungem_phy_write(phy, 0x15, 0x1204); + sungem_phy_write(phy, 0x17, 0x8006); + sungem_phy_write(phy, 0x15, 0x0132); + sungem_phy_write(phy, 0x17, 0x8006); + sungem_phy_write(phy, 0x15, 0x0232); + sungem_phy_write(phy, 0x17, 0x201f); + sungem_phy_write(phy, 0x15, 0x0a20); } /* Configure for gigabit full duplex */ - data = phy_read(phy, MII_BCM5400_GB_CONTROL); + data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_BCM5400_GB_CONTROL, data); + sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(10); /* Reset and configure cascaded 10/100 PHY */ (void)reset_one_mii_phy(phy, 0x1f); - data = __phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); + data = __sungem_phy_read(phy, 0x1f, MII_BCM5201_MULTIPHY); data |= MII_BCM5201_MULTIPHY_SERIALMODE; - __phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); + __sungem_phy_write(phy, 0x1f, MII_BCM5201_MULTIPHY, data); return 0; } @@ -276,7 +276,7 @@ static int bcm5401_init(struct mii_phy* phy) static int bcm5401_suspend(struct mii_phy* phy) { #if 0 /* Commented out in Darwin... someone has those dawn docs ? */ - phy_write(phy, MII_BMCR, BMCR_PDOWN); + sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN); #endif return 0; } @@ -288,19 +288,19 @@ static int bcm5411_init(struct mii_phy* phy) /* Here's some more Apple black magic to setup * some voltage stuffs. */ - phy_write(phy, 0x1c, 0x8c23); - phy_write(phy, 0x1c, 0x8ca3); - phy_write(phy, 0x1c, 0x8c23); + sungem_phy_write(phy, 0x1c, 0x8c23); + sungem_phy_write(phy, 0x1c, 0x8ca3); + sungem_phy_write(phy, 0x1c, 0x8c23); /* Here, Apple seems to want to reset it, do * it as well */ - phy_write(phy, MII_BMCR, BMCR_RESET); - phy_write(phy, MII_BMCR, 0x1340); + sungem_phy_write(phy, MII_BMCR, BMCR_RESET); + sungem_phy_write(phy, MII_BMCR, 0x1340); - data = phy_read(phy, MII_BCM5400_GB_CONTROL); + data = sungem_phy_read(phy, MII_BCM5400_GB_CONTROL); data |= MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_BCM5400_GB_CONTROL, data); + sungem_phy_write(phy, MII_BCM5400_GB_CONTROL, data); udelay(10); @@ -321,7 +321,7 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) phy->advertising = advertise; /* Setup standard advertise */ - adv = phy_read(phy, MII_ADVERTISE); + adv = sungem_phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; @@ -331,12 +331,12 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) adv |= ADVERTISE_100HALF; if (advertise & ADVERTISED_100baseT_Full) adv |= ADVERTISE_100FULL; - phy_write(phy, MII_ADVERTISE, adv); + sungem_phy_write(phy, MII_ADVERTISE, adv); /* Start/Restart aneg */ - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -350,11 +350,11 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) phy->duplex = fd; phy->pause = 0; - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_ANENABLE); /* First reset the PHY */ - phy_write(phy, MII_BMCR, ctl | BMCR_RESET); + sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch(speed) { @@ -369,7 +369,7 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) } if (fd == DUPLEX_FULL) ctl |= BMCR_FULLDPLX; - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -378,8 +378,8 @@ static int genmii_poll_link(struct mii_phy *phy) { u16 status; - (void)phy_read(phy, MII_BMSR); - status = phy_read(phy, MII_BMSR); + (void)sungem_phy_read(phy, MII_BMSR); + status = sungem_phy_read(phy, MII_BMSR); if ((status & BMSR_LSTATUS) == 0) return 0; if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) @@ -392,7 +392,7 @@ static int genmii_read_link(struct mii_phy *phy) u16 lpa; if (phy->autoneg) { - lpa = phy_read(phy, MII_LPA); + lpa = sungem_phy_read(phy, MII_LPA); if (lpa & (LPA_10FULL | LPA_100FULL)) phy->duplex = DUPLEX_FULL; @@ -413,7 +413,7 @@ static int genmii_read_link(struct mii_phy *phy) static int generic_suspend(struct mii_phy* phy) { - phy_write(phy, MII_BMCR, BMCR_PDOWN); + sungem_phy_write(phy, MII_BMCR, BMCR_PDOWN); return 0; } @@ -423,27 +423,27 @@ static int bcm5421_init(struct mii_phy* phy) u16 data; unsigned int id; - id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); + id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2)); /* Revision 0 of 5421 needs some fixups */ if (id == 0x002060e0) { /* This is borrowed from MacOS */ - phy_write(phy, 0x18, 0x1007); - data = phy_read(phy, 0x18); - phy_write(phy, 0x18, data | 0x0400); - phy_write(phy, 0x18, 0x0007); - data = phy_read(phy, 0x18); - phy_write(phy, 0x18, data | 0x0800); - phy_write(phy, 0x17, 0x000a); - data = phy_read(phy, 0x15); - phy_write(phy, 0x15, data | 0x0200); + sungem_phy_write(phy, 0x18, 0x1007); + data = sungem_phy_read(phy, 0x18); + sungem_phy_write(phy, 0x18, data | 0x0400); + sungem_phy_write(phy, 0x18, 0x0007); + data = sungem_phy_read(phy, 0x18); + sungem_phy_write(phy, 0x18, data | 0x0800); + sungem_phy_write(phy, 0x17, 0x000a); + data = sungem_phy_read(phy, 0x15); + sungem_phy_write(phy, 0x15, data | 0x0200); } /* Pick up some init code from OF for K2 version */ if ((id & 0xfffffff0) == 0x002062e0) { - phy_write(phy, 4, 0x01e1); - phy_write(phy, 9, 0x0300); + sungem_phy_write(phy, 4, 0x01e1); + sungem_phy_write(phy, 9, 0x0300); } /* Check if we can enable automatic low power */ @@ -455,9 +455,9 @@ static int bcm5421_init(struct mii_phy* phy) can_low_power = 0; if (can_low_power) { /* Enable automatic low-power */ - phy_write(phy, 0x1c, 0x9002); - phy_write(phy, 0x1c, 0xa821); - phy_write(phy, 0x1c, 0x941d); + sungem_phy_write(phy, 0x1c, 0x9002); + sungem_phy_write(phy, 0x1c, 0xa821); + sungem_phy_write(phy, 0x1c, 0x941d); } } #endif /* CONFIG_PPC_PMAC */ @@ -476,7 +476,7 @@ static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) phy->advertising = advertise; /* Setup standard advertise */ - adv = phy_read(phy, MII_ADVERTISE); + adv = sungem_phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; @@ -490,21 +490,21 @@ static int bcm54xx_setup_aneg(struct mii_phy *phy, u32 advertise) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; - phy_write(phy, MII_ADVERTISE, adv); + sungem_phy_write(phy, MII_ADVERTISE, adv); /* Setup 1000BT advertise */ - adv = phy_read(phy, MII_1000BASETCONTROL); + adv = sungem_phy_read(phy, MII_1000BASETCONTROL); adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP|MII_1000BASETCONTROL_HALFDUPLEXCAP); if (advertise & SUPPORTED_1000baseT_Half) adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; if (advertise & SUPPORTED_1000baseT_Full) adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_1000BASETCONTROL, adv); + sungem_phy_write(phy, MII_1000BASETCONTROL, adv); /* Start/Restart aneg */ - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -518,11 +518,11 @@ static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd) phy->duplex = fd; phy->pause = 0; - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); /* First reset the PHY */ - phy_write(phy, MII_BMCR, ctl | BMCR_RESET); + sungem_phy_write(phy, MII_BMCR, ctl | BMCR_RESET); /* Select speed & duplex */ switch(speed) { @@ -539,7 +539,7 @@ static int bcm54xx_setup_forced(struct mii_phy *phy, int speed, int fd) // XXX Should we set the sungem to GII now on 1000BT ? - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -550,7 +550,7 @@ static int bcm54xx_read_link(struct mii_phy *phy) u16 val; if (phy->autoneg) { - val = phy_read(phy, MII_BCM5400_AUXSTATUS); + val = sungem_phy_read(phy, MII_BCM5400_AUXSTATUS); link_mode = ((val & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) >> MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT); phy->duplex = phy_BCM5400_link_table[link_mode][0] ? @@ -559,7 +559,7 @@ static int bcm54xx_read_link(struct mii_phy *phy) SPEED_1000 : (phy_BCM5400_link_table[link_mode][1] ? SPEED_100 : SPEED_10); - val = phy_read(phy, MII_LPA); + val = sungem_phy_read(phy, MII_LPA); phy->pause = (phy->duplex == DUPLEX_FULL) && ((val & LPA_PAUSE) != 0); } @@ -575,19 +575,19 @@ static int marvell88e1111_init(struct mii_phy* phy) u16 rev; /* magic init sequence for rev 0 */ - rev = phy_read(phy, MII_PHYSID2) & 0x000f; + rev = sungem_phy_read(phy, MII_PHYSID2) & 0x000f; if (rev == 0) { - phy_write(phy, 0x1d, 0x000a); - phy_write(phy, 0x1e, 0x0821); + sungem_phy_write(phy, 0x1d, 0x000a); + sungem_phy_write(phy, 0x1e, 0x0821); - phy_write(phy, 0x1d, 0x0006); - phy_write(phy, 0x1e, 0x8600); + sungem_phy_write(phy, 0x1d, 0x0006); + sungem_phy_write(phy, 0x1e, 0x8600); - phy_write(phy, 0x1d, 0x000b); - phy_write(phy, 0x1e, 0x0100); + sungem_phy_write(phy, 0x1d, 0x000b); + sungem_phy_write(phy, 0x1e, 0x0100); - phy_write(phy, 0x1d, 0x0004); - phy_write(phy, 0x1e, 0x4850); + sungem_phy_write(phy, 0x1d, 0x0004); + sungem_phy_write(phy, 0x1e, 0x4850); } return 0; } @@ -600,8 +600,8 @@ static int bcm5421_poll_link(struct mii_phy* phy) int mode; /* find out in what mode we are */ - phy_write(phy, MII_NCONFIG, 0x1000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x1000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5421_MODE_MASK) >> 5; @@ -609,8 +609,8 @@ static int bcm5421_poll_link(struct mii_phy* phy) return genmii_poll_link(phy); /* try to find out whether we have a link */ - phy_write(phy, MII_NCONFIG, 0x2000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x2000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); if (phy_reg & 0x0020) return 0; @@ -624,8 +624,8 @@ static int bcm5421_read_link(struct mii_phy* phy) int mode; /* find out in what mode we are */ - phy_write(phy, MII_NCONFIG, 0x1000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x1000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5421_MODE_MASK ) >> 5; @@ -635,8 +635,8 @@ static int bcm5421_read_link(struct mii_phy* phy) phy->speed = SPEED_1000; /* find out whether we are running half- or full duplex */ - phy_write(phy, MII_NCONFIG, 0x2000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x2000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); if ( (phy_reg & 0x0080) >> 7) phy->duplex |= DUPLEX_HALF; @@ -649,14 +649,14 @@ static int bcm5421_read_link(struct mii_phy* phy) static int bcm5421_enable_fiber(struct mii_phy* phy, int autoneg) { /* enable fiber mode */ - phy_write(phy, MII_NCONFIG, 0x9020); + sungem_phy_write(phy, MII_NCONFIG, 0x9020); /* LEDs active in both modes, autosense prio = fiber */ - phy_write(phy, MII_NCONFIG, 0x945f); + sungem_phy_write(phy, MII_NCONFIG, 0x945f); if (!autoneg) { /* switch off fibre autoneg */ - phy_write(phy, MII_NCONFIG, 0xfc01); - phy_write(phy, 0x0b, 0x0004); + sungem_phy_write(phy, MII_NCONFIG, 0xfc01); + sungem_phy_write(phy, 0x0b, 0x0004); } phy->autoneg = autoneg; @@ -673,8 +673,8 @@ static int bcm5461_poll_link(struct mii_phy* phy) int mode; /* find out in what mode we are */ - phy_write(phy, MII_NCONFIG, 0x7c00); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x7c00); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; @@ -682,8 +682,8 @@ static int bcm5461_poll_link(struct mii_phy* phy) return genmii_poll_link(phy); /* find out whether we have a link */ - phy_write(phy, MII_NCONFIG, 0x7000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x7000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); if (phy_reg & BCM5461_FIBER_LINK) return 1; @@ -699,8 +699,8 @@ static int bcm5461_read_link(struct mii_phy* phy) int mode; /* find out in what mode we are */ - phy_write(phy, MII_NCONFIG, 0x7c00); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x7c00); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); mode = (phy_reg & BCM5461_MODE_MASK ) >> 1; @@ -711,8 +711,8 @@ static int bcm5461_read_link(struct mii_phy* phy) phy->speed = SPEED_1000; /* find out whether we are running half- or full duplex */ - phy_write(phy, MII_NCONFIG, 0x7000); - phy_reg = phy_read(phy, MII_NCONFIG); + sungem_phy_write(phy, MII_NCONFIG, 0x7000); + phy_reg = sungem_phy_read(phy, MII_NCONFIG); if (phy_reg & BCM5461_FIBER_DUPLEX) phy->duplex |= DUPLEX_FULL; @@ -725,15 +725,15 @@ static int bcm5461_read_link(struct mii_phy* phy) static int bcm5461_enable_fiber(struct mii_phy* phy, int autoneg) { /* select fiber mode, enable 1000 base-X registers */ - phy_write(phy, MII_NCONFIG, 0xfc0b); + sungem_phy_write(phy, MII_NCONFIG, 0xfc0b); if (autoneg) { /* enable fiber with no autonegotiation */ - phy_write(phy, MII_ADVERTISE, 0x01e0); - phy_write(phy, MII_BMCR, 0x1140); + sungem_phy_write(phy, MII_ADVERTISE, 0x01e0); + sungem_phy_write(phy, MII_BMCR, 0x1140); } else { /* enable fiber with autonegotiation */ - phy_write(phy, MII_BMCR, 0x0140); + sungem_phy_write(phy, MII_BMCR, 0x0140); } phy->autoneg = autoneg; @@ -752,7 +752,7 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) phy->advertising = advertise; /* Setup standard advertise */ - adv = phy_read(phy, MII_ADVERTISE); + adv = sungem_phy_read(phy, MII_ADVERTISE); adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (advertise & ADVERTISED_10baseT_Half) adv |= ADVERTISE_10HALF; @@ -766,7 +766,7 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) adv |= ADVERTISE_PAUSE_CAP; if (advertise & ADVERTISED_Asym_Pause) adv |= ADVERTISE_PAUSE_ASYM; - phy_write(phy, MII_ADVERTISE, adv); + sungem_phy_write(phy, MII_ADVERTISE, adv); /* Setup 1000BT advertise & enable crossover detect * XXX How do we advertise 1000BT ? Darwin source is @@ -774,7 +774,7 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) * write to control... Someone has specs for those * beasts ? */ - adv = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); + adv = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); adv |= MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX; adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | MII_1000BASETCONTROL_HALFDUPLEXCAP); @@ -782,12 +782,12 @@ static int marvell_setup_aneg(struct mii_phy *phy, u32 advertise) adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; if (advertise & SUPPORTED_1000baseT_Full) adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; - phy_write(phy, MII_1000BASETCONTROL, adv); + sungem_phy_write(phy, MII_1000BASETCONTROL, adv); /* Start/Restart aneg */ - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -801,7 +801,7 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) phy->duplex = fd; phy->pause = 0; - ctl = phy_read(phy, MII_BMCR); + ctl = sungem_phy_read(phy, MII_BMCR); ctl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPD2|BMCR_ANENABLE); ctl |= BMCR_RESET; @@ -824,7 +824,7 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) /* Disable crossover. Again, the way Apple does it is strange, * though I don't assume they are wrong ;) */ - ctl2 = phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); + ctl2 = sungem_phy_read(phy, MII_M1011_PHY_SPEC_CONTROL); ctl2 &= ~(MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX | MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX | MII_1000BASETCONTROL_FULLDUPLEXCAP | @@ -833,11 +833,11 @@ static int marvell_setup_forced(struct mii_phy *phy, int speed, int fd) ctl2 |= (fd == DUPLEX_FULL) ? MII_1000BASETCONTROL_FULLDUPLEXCAP : MII_1000BASETCONTROL_HALFDUPLEXCAP; - phy_write(phy, MII_1000BASETCONTROL, ctl2); + sungem_phy_write(phy, MII_1000BASETCONTROL, ctl2); // XXX Should we set the sungem to GII now on 1000BT ? - phy_write(phy, MII_BMCR, ctl); + sungem_phy_write(phy, MII_BMCR, ctl); return 0; } @@ -847,7 +847,7 @@ static int marvell_read_link(struct mii_phy *phy) u16 status, pmask; if (phy->autoneg) { - status = phy_read(phy, MII_M1011_PHY_SPEC_STATUS); + status = sungem_phy_read(phy, MII_M1011_PHY_SPEC_STATUS); if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0) return -EAGAIN; if (status & MII_M1011_PHY_SPEC_STATUS_1000) @@ -1174,7 +1174,7 @@ int sungem_phy_probe(struct mii_phy *phy, int mii_id) goto fail; /* Read ID and find matching entry */ - id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)); + id = (sungem_phy_read(phy, MII_PHYSID1) << 16 | sungem_phy_read(phy, MII_PHYSID2)); printk(KERN_DEBUG KBUILD_MODNAME ": " "PHY ID: %x, addr: %x\n", id, mii_id); for (i=0; (def = mii_phy_table[i]) != NULL; i++) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index ef10302ec936..2277c3679a51 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1003,7 +1003,6 @@ static int team_port_enter(struct team *team, struct team_port *port) int err = 0; dev_hold(team->dev); - port->dev->priv_flags |= IFF_TEAM_PORT; if (team->ops.port_enter) { err = team->ops.port_enter(team, port); if (err) { @@ -1016,7 +1015,6 @@ static int team_port_enter(struct team *team, struct team_port *port) return 0; err_port_enter: - port->dev->priv_flags &= ~IFF_TEAM_PORT; dev_put(team->dev); return err; @@ -1026,7 +1024,6 @@ static void team_port_leave(struct team *team, struct team_port *port) { if (team->ops.port_leave) team->ops.port_leave(team, port); - port->dev->priv_flags &= ~IFF_TEAM_PORT; dev_put(team->dev); } @@ -1075,6 +1072,25 @@ static void team_port_disable_netpoll(struct team_port *port) } #endif +static int team_upper_dev_link(struct net_device *dev, + struct net_device *port_dev) +{ + int err; + + err = netdev_master_upper_dev_link(port_dev, dev); + if (err) + return err; + port_dev->priv_flags |= IFF_TEAM_PORT; + return 0; +} + +static void team_upper_dev_unlink(struct net_device *dev, + struct net_device *port_dev) +{ + netdev_upper_dev_unlink(port_dev, dev); + port_dev->priv_flags &= ~IFF_TEAM_PORT; +} + static void __team_port_change_port_added(struct team_port *port, bool linkup); static int team_dev_type_check_change(struct net_device *dev, struct net_device *port_dev); @@ -1161,13 +1177,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_enable_netpoll; } - err = netdev_master_upper_dev_link(port_dev, dev); - if (err) { - netdev_err(dev, "Device %s failed to set upper link\n", - portname); - goto err_set_upper_link; - } - err = netdev_rx_handler_register(port_dev, team_handle_frame, port); if (err) { @@ -1176,6 +1185,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_handler_register; } + err = team_upper_dev_link(dev, port_dev); + if (err) { + netdev_err(dev, "Device %s failed to set upper link\n", + portname); + goto err_set_upper_link; + } + err = __team_option_inst_add_port(team, port); if (err) { netdev_err(dev, "Device %s failed to add per-port options\n", @@ -1195,12 +1211,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev) return 0; err_option_port_add: + team_upper_dev_unlink(dev, port_dev); + +err_set_upper_link: netdev_rx_handler_unregister(port_dev); err_handler_register: - netdev_upper_dev_unlink(port_dev, dev); - -err_set_upper_link: team_port_disable_netpoll(port); err_enable_netpoll: @@ -1239,8 +1255,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev) team_port_disable(team, port); list_del_rcu(&port->list); + team_upper_dev_unlink(dev, port_dev); netdev_rx_handler_unregister(port_dev); - netdev_upper_dev_unlink(port_dev, dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); dev_uc_unsync(port_dev, dev); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 74760e8143e3..3d5c39a1e46f 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -424,7 +424,7 @@ enum rtl_register_content { FULL_DUP = 0x01, }; -#define RTL8152_MAX_TX 10 +#define RTL8152_MAX_TX 4 #define RTL8152_MAX_RX 10 #define INTBUFSIZE 2 #define CRC_SIZE 4 @@ -506,6 +506,7 @@ struct rx_desc { #define IPF (1 << 23) /* IP checksum fail */ #define UDPF (1 << 22) /* UDP checksum fail */ #define TCPF (1 << 21) /* TCP checksum fail */ +#define RX_VLAN_TAG (1 << 16) __le32 opts4; __le32 opts5; @@ -531,6 +532,7 @@ struct tx_desc { #define MSS_MAX 0x7ffU #define TCPHO_SHIFT 17 #define TCPHO_MAX 0x7ffU +#define TX_VLAN_TAG (1 << 16) }; struct r8152; @@ -607,9 +609,9 @@ enum tx_csum_stat { * The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; -static unsigned int rx_buf_sz = 16384; +static unsigned int agg_buf_sz = 16384; -#define RTL_LIMITED_TSO_SIZE (rx_buf_sz - sizeof(struct tx_desc) - \ +#define RTL_LIMITED_TSO_SIZE (agg_buf_sz - sizeof(struct tx_desc) - \ VLAN_ETH_HLEN - VLAN_HLEN) static @@ -623,8 +625,8 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) return -ENOMEM; ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), - RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, - value, index, tmp, size, 500); + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, + value, index, tmp, size, 500); memcpy(data, tmp, size); kfree(tmp); @@ -643,8 +645,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) return -ENOMEM; ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0), - RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, - value, index, tmp, size, 500); + RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, + value, index, tmp, size, 500); kfree(tmp); @@ -652,7 +654,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) } static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, - void *data, u16 type) + void *data, u16 type) { u16 limit = 64; int ret = 0; @@ -692,7 +694,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size, } static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, - u16 size, void *data, u16 type) + u16 size, void *data, u16 type) { int ret; u16 byteen_start, byteen_end, byen; @@ -726,8 +728,8 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, while (size) { if (size > limit) { ret = set_registers(tp, index, - type | BYTE_EN_DWORD, - limit, data); + type | BYTE_EN_DWORD, + limit, data); if (ret < 0) goto error1; @@ -736,8 +738,8 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen, size -= limit; } else { ret = set_registers(tp, index, - type | BYTE_EN_DWORD, - size, data); + type | BYTE_EN_DWORD, + size, data); if (ret < 0) goto error1; @@ -972,36 +974,8 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val) usb_autopm_put_interface(tp->intf); } -static -int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); - -static inline void set_ethernet_addr(struct r8152 *tp) -{ - struct net_device *dev = tp->netdev; - int ret; - u8 node_id[8] = {0}; - - if (tp->version == RTL_VER_01) - ret = pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id); - else - ret = pla_ocp_read(tp, PLA_BACKUP, sizeof(node_id), node_id); - - if (ret < 0) { - netif_notice(tp, probe, dev, "inet addr fail\n"); - } else { - if (tp->version != RTL_VER_01) { - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, - CRWECR_CONFIG); - pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, - sizeof(node_id), node_id); - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, - CRWECR_NORAML); - } - - memcpy(dev->dev_addr, node_id, dev->addr_len); - memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - } -} +static int +r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags); static int rtl8152_set_mac_address(struct net_device *netdev, void *p) { @@ -1020,6 +994,37 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) return 0; } +static int set_ethernet_addr(struct r8152 *tp) +{ + struct net_device *dev = tp->netdev; + struct sockaddr sa; + int ret; + + if (tp->version == RTL_VER_01) + ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data); + else + ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); + + if (ret < 0) { + netif_err(tp, probe, dev, "Get ether addr fail\n"); + } else if (!is_valid_ether_addr(sa.sa_data)) { + netif_err(tp, probe, dev, "Invalid ether addr %pM\n", + sa.sa_data); + eth_hw_addr_random(dev); + ether_addr_copy(sa.sa_data, dev->dev_addr); + ret = rtl8152_set_mac_address(dev, &sa); + netif_info(tp, probe, dev, "Random ether addr %pM\n", + sa.sa_data); + } else { + if (tp->version == RTL_VER_01) + ether_addr_copy(dev->dev_addr, sa.sa_data); + else + ret = rtl8152_set_mac_address(dev, &sa); + } + + return ret; +} + static void read_bulk_callback(struct urb *urb) { struct net_device *netdev; @@ -1248,13 +1253,13 @@ static int alloc_all_mem(struct r8152 *tp) skb_queue_head_init(&tp->tx_queue); for (i = 0; i < RTL8152_MAX_RX; i++) { - buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; if (buf != rx_agg_align(buf)) { kfree(buf); - buf = kmalloc_node(rx_buf_sz + RX_ALIGN, GFP_KERNEL, + buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL, node); if (!buf) goto err1; @@ -1274,13 +1279,13 @@ static int alloc_all_mem(struct r8152 *tp) } for (i = 0; i < RTL8152_MAX_TX; i++) { - buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node); if (!buf) goto err1; if (buf != tx_agg_align(buf)) { kfree(buf); - buf = kmalloc_node(rx_buf_sz + TX_ALIGN, GFP_KERNEL, + buf = kmalloc_node(agg_buf_sz + TX_ALIGN, GFP_KERNEL, node); if (!buf) goto err1; @@ -1311,8 +1316,8 @@ static int alloc_all_mem(struct r8152 *tp) tp->intr_interval = (int)ep_intr->desc.bInterval; usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3), - tp->intr_buff, INTBUFSIZE, intr_callback, - tp, tp->intr_interval); + tp->intr_buff, INTBUFSIZE, intr_callback, + tp, tp->intr_interval); return 0; @@ -1354,8 +1359,7 @@ static inline __be16 get_protocol(struct sk_buff *skb) return protocol; } -/* - * r8152_csum_workaround() +/* r8152_csum_workaround() * The hw limites the value the transport offset. When the offset is out of the * range, calculate the checksum by sw. */ @@ -1398,8 +1402,7 @@ drop: } } -/* - * msdn_giant_send_check() +/* msdn_giant_send_check() * According to the document of microsoft, the TCP Pseudo Header excludes the * packet length for IPv6 TCP large packets. */ @@ -1422,6 +1425,25 @@ static int msdn_giant_send_check(struct sk_buff *skb) return ret; } +static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb) +{ + if (vlan_tx_tag_present(skb)) { + u32 opts2; + + opts2 = TX_VLAN_TAG | swab16(vlan_tx_tag_get(skb)); + desc->opts2 |= cpu_to_le32(opts2); + } +} + +static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RX_VLAN_TAG) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + swab16(opts2 & 0xffff)); +} + static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb, u32 len, u32 transport_offset) { @@ -1518,8 +1540,9 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) spin_unlock(&tx_queue->lock); tx_data = agg->head; - agg->skb_num = agg->skb_len = 0; - remain = rx_buf_sz; + agg->skb_num = 0; + agg->skb_len = 0; + remain = agg_buf_sz; while (remain >= ETH_ZLEN + sizeof(struct tx_desc)) { struct tx_desc *tx_desc; @@ -1548,6 +1571,8 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) continue; } + rtl_tx_vlan_tag(tx_desc, skb); + tx_data += sizeof(*tx_desc); len = skb->len; @@ -1566,7 +1591,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg) dev_kfree_skb_any(skb); - remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); + remain = agg_buf_sz - (int)(tx_agg_align(tx_data) - agg->head); } if (!skb_queue_empty(&skb_head)) { @@ -1689,6 +1714,7 @@ static void rx_bottom(struct r8152 *tp) memcpy(skb->data, rx_data, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, netdev); + rtl_rx_vlan_tag(rx_desc, skb); netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += pkt_len; @@ -1772,8 +1798,8 @@ static int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags) { usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1), - agg->head, rx_buf_sz, - (usb_complete_t)read_bulk_callback, agg); + agg->head, agg_buf_sz, + (usb_complete_t)read_bulk_callback, agg); return usb_submit_urb(agg->urb, mem_flags); } @@ -1835,18 +1861,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) /* Unconditionally log net taps. */ netif_notice(tp, link, netdev, "Promiscuous mode enabled\n"); ocp_data |= RCR_AM | RCR_AAP; - mc_filter[1] = mc_filter[0] = 0xffffffff; + mc_filter[1] = 0xffffffff; + mc_filter[0] = 0xffffffff; } else if ((netdev_mc_count(netdev) > multicast_filter_limit) || (netdev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ ocp_data |= RCR_AM; - mc_filter[1] = mc_filter[0] = 0xffffffff; + mc_filter[1] = 0xffffffff; + mc_filter[0] = 0xffffffff; } else { struct netdev_hw_addr *ha; - mc_filter[1] = mc_filter[0] = 0; + mc_filter[1] = 0; + mc_filter[0] = 0; netdev_for_each_mc_addr(ha, netdev) { int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); ocp_data |= RCR_AM; } @@ -1861,7 +1891,7 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) } static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, - struct net_device *netdev) + struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); @@ -1877,8 +1907,9 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, usb_mark_last_busy(tp->udev); tasklet_schedule(&tp->tl); } - } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) + } else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) { netif_stop_queue(netdev); + } return NETDEV_TX_OK; } @@ -1903,7 +1934,7 @@ static void rtl8152_nic_reset(struct r8152 *tp) for (i = 0; i < 1000; i++) { if (!(ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CR) & CR_RST)) break; - udelay(100); + usleep_range(100, 400); } } @@ -1911,8 +1942,8 @@ static void set_tx_qlen(struct r8152 *tp) { struct net_device *netdev = tp->netdev; - tp->tx_qlen = rx_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN + - sizeof(struct tx_desc)); + tp->tx_qlen = agg_buf_sz / (netdev->mtu + VLAN_ETH_HLEN + VLAN_HLEN + + sizeof(struct tx_desc)); } static inline u8 rtl8152_get_speed(struct r8152 *tp) @@ -2044,13 +2075,13 @@ static void rtl_disable(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if ((ocp_data & FIFO_EMPTY) == FIFO_EMPTY) break; - mdelay(1); + usleep_range(1000, 2000); } for (i = 0; i < 1000; i++) { if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0) & TCR0_TX_EMPTY) break; - mdelay(1); + usleep_range(1000, 2000); } for (i = 0; i < RTL8152_MAX_RX; i++) @@ -2075,6 +2106,34 @@ static void r8152_power_cut_en(struct r8152 *tp, bool enable) ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data); } +static void rtl_rx_vlan_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); + if (enable) + ocp_data |= CPCR_RX_VLAN; + else + ocp_data &= ~CPCR_RX_VLAN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); +} + +static int rtl8152_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = features ^ dev->features; + struct r8152 *tp = netdev_priv(dev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + rtl_rx_vlan_en(tp, true); + else + rtl_rx_vlan_en(tp, false); + } + + return 0; +} + #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl_get_wol(struct r8152 *tp) @@ -2195,7 +2254,7 @@ static void rtl_clear_bp(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0); ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0); ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0); - mdelay(3); + usleep_range(3000, 6000); ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0); ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0); } @@ -2282,7 +2341,7 @@ static void r8152b_exit_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); @@ -2293,7 +2352,7 @@ static void r8152b_exit_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } rtl8152_nic_reset(tp); @@ -2324,9 +2383,7 @@ static void r8152b_exit_oob(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_USB, USB_TX_DMA, TEST_MODE_DISABLE | TX_SIZE_ADJUST1); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - ocp_data &= ~CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); @@ -2354,7 +2411,7 @@ static void r8152b_enter_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); @@ -2365,14 +2422,12 @@ static void r8152b_enter_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - ocp_data |= CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; @@ -2509,7 +2564,7 @@ static void r8153_first_init(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); @@ -2520,12 +2575,10 @@ static void r8153_first_init(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - ocp_data &= ~CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); @@ -2564,7 +2617,7 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); @@ -2575,7 +2628,7 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); if (ocp_data & LINK_LIST_READY) break; - mdelay(1); + usleep_range(1000, 2000); } ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); @@ -2584,9 +2637,7 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data &= ~TEREDO_WAKE_MASK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data); - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR); - ocp_data |= CPCR_RX_VLAN; - ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data); + rtl_rx_vlan_en(tp, true); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR); ocp_data |= ALDPS_PROXY_MODE; @@ -2886,8 +2937,7 @@ static int rtl8152_close(struct net_device *netdev) if (res < 0) { rtl_drop_queued_tx(tp); } else { - /* - * The autosuspend may have been enabled and wouldn't + /* The autosuspend may have been enabled and wouldn't * be disable when autoresume occurs, because the * netif_running() would be false. */ @@ -3113,8 +3163,9 @@ static int rtl8152_resume(struct usb_interface *intf) } else { tp->rtl_ops.up(tp); rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, - DUPLEX_FULL); + tp->mii.supports_gmii ? + SPEED_1000 : SPEED_100, + DUPLEX_FULL); } tp->speed = 0; netif_carrier_off(tp->netdev); @@ -3175,8 +3226,8 @@ static void rtl8152_get_drvinfo(struct net_device *netdev, { struct r8152 *tp = netdev_priv(netdev); - strncpy(info->driver, MODULENAME, ETHTOOL_BUSINFO_LEN); - strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(tp->udev, info->bus_info, sizeof(info->bus_info)); } @@ -3351,6 +3402,7 @@ static const struct net_device_ops rtl8152_netdev_ops = { .ndo_do_ioctl = rtl8152_ioctl, .ndo_start_xmit = rtl8152_start_xmit, .ndo_tx_timeout = rtl8152_tx_timeout, + .ndo_set_features = rtl8152_set_features, .ndo_set_rx_mode = rtl8152_set_rx_mode, .ndo_set_mac_address = rtl8152_set_mac_address, .ndo_change_mtu = rtl8152_change_mtu, @@ -3505,10 +3557,16 @@ static int rtl8152_probe(struct usb_interface *intf, netdev->features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | NETIF_F_IPV6_CSUM | - NETIF_F_TSO6; + NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_FRAGLIST | - NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | + NETIF_F_IPV6_CSUM | NETIF_F_TSO6; netdev->ethtool_ops = &ops; netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 59caa06f34a6..3d0ce4468ce6 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -546,8 +546,8 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) skb_put(skb, GOOD_PACKET_LEN); hdr = skb_vnet_hdr(skb); + sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); - skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); @@ -563,6 +563,8 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) char *p; int i, err, offset; + sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); + /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(rq, gfp); @@ -899,6 +901,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (vi->mergeable_rx_bufs) hdr->mhdr.num_buffers = 0; + sg_init_table(sq->sg, MAX_SKB_FRAGS + 2); if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); @@ -934,7 +937,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - virtqueue_kick(sq->vq); /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); @@ -954,6 +956,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) } } + if (__netif_subqueue_stopped(dev, qnum) || !skb->xmit_more) + virtqueue_kick(sq->vq); + return NETDEV_TX_OK; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index beb377b2d4b7..39c86536fb9b 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -42,6 +42,7 @@ #include <net/netns/generic.h> #include <net/vxlan.h> #include <net/protocol.h> +#include <net/udp_tunnel.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/addrconf.h> @@ -1062,7 +1063,6 @@ void vxlan_sock_release(struct vxlan_sock *vs) spin_lock(&vn->sock_lock); hlist_del_rcu(&vs->hlist); - rcu_assign_sk_user_data(vs->sock->sk, NULL); vxlan_notify_del_rx_port(vs); spin_unlock(&vn->sock_lock); @@ -1158,8 +1158,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!vs) goto drop; - skb_pop_rcv_encapsulation(skb); - vs->rcv(vs, skb, vxh->vx_vni); return 0; @@ -1338,7 +1336,6 @@ out: } #if IS_ENABLED(CONFIG_IPV6) - static struct sk_buff *vxlan_na_create(struct sk_buff *request, struct neighbour *n, bool isrouter) { @@ -1572,13 +1569,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) return false; } -static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb, - bool udp_csum) -{ - int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - return iptunnel_handle_offloads(skb, udp_csum, type); -} - #if IS_ENABLED(CONFIG_IPV6) static int vxlan6_xmit_skb(struct vxlan_sock *vs, struct dst_entry *dst, struct sk_buff *skb, @@ -1587,13 +1577,12 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, __be16 src_port, __be16 dst_port, __be32 vni, bool xnet) { - struct ipv6hdr *ip6h; struct vxlanhdr *vxh; - struct udphdr *uh; int min_headroom; int err; + bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk); - skb = vxlan_handle_offloads(skb, !udp_get_no_check6_tx(vs->sock->sk)); + skb = udp_tunnel_handle_offloads(skb, udp_sum); if (IS_ERR(skb)) return -EINVAL; @@ -1621,38 +1610,8 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; - __skb_push(skb, sizeof(*uh)); - skb_reset_transport_header(skb); - uh = udp_hdr(skb); - - uh->dest = dst_port; - uh->source = src_port; - - uh->len = htons(skb->len); - - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | - IPSKB_REROUTED); - skb_dst_set(skb, dst); - - udp6_set_csum(udp_get_no_check6_tx(vs->sock->sk), skb, - saddr, daddr, skb->len); - - __skb_push(skb, sizeof(*ip6h)); - skb_reset_network_header(skb); - ip6h = ipv6_hdr(skb); - ip6h->version = 6; - ip6h->priority = prio; - ip6h->flow_lbl[0] = 0; - ip6h->flow_lbl[1] = 0; - ip6h->flow_lbl[2] = 0; - ip6h->payload_len = htons(skb->len); - ip6h->nexthdr = IPPROTO_UDP; - ip6h->hop_limit = ttl; - ip6h->daddr = *daddr; - ip6h->saddr = *saddr; - - ip6tunnel_xmit(skb, dev); + udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio, + ttl, src_port, dst_port); return 0; } #endif @@ -1663,11 +1622,11 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, __be16 src_port, __be16 dst_port, __be32 vni, bool xnet) { struct vxlanhdr *vxh; - struct udphdr *uh; int min_headroom; int err; + bool udp_sum = !vs->sock->sk->sk_no_check_tx; - skb = vxlan_handle_offloads(skb, !vs->sock->sk->sk_no_check_tx); + skb = udp_tunnel_handle_offloads(skb, udp_sum); if (IS_ERR(skb)) return -EINVAL; @@ -1693,20 +1652,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_vni = vni; - __skb_push(skb, sizeof(*uh)); - skb_reset_transport_header(skb); - uh = udp_hdr(skb); - - uh->dest = dst_port; - uh->source = src_port; - - uh->len = htons(skb->len); - - udp_set_csum(vs->sock->sk->sk_no_check_tx, skb, - src, dst, skb->len); - - return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP, - tos, ttl, df, xnet); + return udp_tunnel_xmit_skb(vs->sock, rt, skb, src, dst, tos, + ttl, df, src_port, dst_port, xnet); } EXPORT_SYMBOL_GPL(vxlan_xmit_skb); @@ -1831,11 +1778,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb, - fl4.saddr, dst->sin.sin_addr.s_addr, - tos, ttl, df, src_port, dst_port, - htonl(vni << 8), - !net_eq(vxlan->net, dev_net(vxlan->dev))); + err = udp_tunnel_xmit_skb(vxlan->vn_sock->sock, rt, skb, + fl4.saddr, dst->sin.sin_addr.s_addr, + tos, ttl, df, src_port, dst_port, + !net_eq(vxlan->net, + dev_net(vxlan->dev))); if (err < 0) goto rt_tx_error; @@ -2335,8 +2282,7 @@ static const struct ethtool_ops vxlan_ethtool_ops = { static void vxlan_del_work(struct work_struct *work) { struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work); - - sk_release_kernel(vs->sock->sk); + udp_tunnel_sock_release(vs->sock); kfree_rcu(vs, rcu); } @@ -2369,9 +2315,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, if (err < 0) return ERR_PTR(err); - /* Disable multicast loopback */ - inet_sk(sock->sk)->mc_loop = 0; - return sock; } @@ -2383,9 +2326,9 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; struct socket *sock; - struct sock *sk; unsigned int h; bool ipv6 = !!(flags & VXLAN_F_IPV6); + struct udp_tunnel_sock_cfg tunnel_cfg; vs = kzalloc(sizeof(*vs), GFP_KERNEL); if (!vs) @@ -2403,11 +2346,9 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, } vs->sock = sock; - sk = sock->sk; atomic_set(&vs->refcnt, 1); vs->rcv = rcv; vs->data = data; - rcu_assign_sk_user_data(vs->sock->sk, vs); /* Initialize the vxlan udp offloads structure */ vs->udp_offloads.port = port; @@ -2420,14 +2361,12 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port, spin_unlock(&vn->sock_lock); /* Mark socket as an encapsulation socket. */ - udp_sk(sk)->encap_type = 1; - udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; -#if IS_ENABLED(CONFIG_IPV6) - if (ipv6) - ipv6_stub->udpv6_encap_enable(); - else -#endif - udp_encap_enable(); + tunnel_cfg.sk_user_data = vs; + tunnel_cfg.encap_type = 1; + tunnel_cfg.encap_rcv = vxlan_udp_encap_recv; + tunnel_cfg.encap_destroy = NULL; + + setup_udp_tunnel_sock(net, sock, &tunnel_cfg); return vs; } diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 43c9960dce1c..ae6ecf401189 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -192,8 +192,10 @@ static netdev_tx_t dlci_transmit(struct sk_buff *skb, struct net_device *dev) { struct dlci_local *dlp = netdev_priv(dev); - if (skb) - dlp->slave->netdev_ops->ndo_start_xmit(skb, dlp->slave); + if (skb) { + struct netdev_queue *txq = skb_get_tx_queue(dev, skb); + netdev_start_xmit(skb, dlp->slave, txq, false); + } return NETDEV_TX_OK; } diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index fd9e5305e77f..c1a4ade32772 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -261,6 +261,7 @@ enum ATH_DEBUG { ATH_DBG_MCI = 0x00008000, ATH_DBG_DFS = 0x00010000, ATH_DBG_WOW = 0x00020000, + ATH_DBG_CHAN_CTX = 0x00040000, ATH_DBG_ANY = 0xffffffff }; diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index a6f5285235af..1053bb5f2cdc 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -25,6 +25,7 @@ config ATH10K_DEBUG config ATH10K_DEBUGFS bool "Atheros ath10k debugfs support" depends on ATH10K + select RELAY ---help--- Enabled debugfs support diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index a4179f49ee1f..2cfb63ca9327 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -10,6 +10,7 @@ ath10k_core-y += mac.o \ wmi.o \ bmi.o +ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 17d221abd58c..3d29b0875b3e 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -22,7 +22,7 @@ void ath10k_bmi_start(struct ath10k *ar) { - ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n"); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n"); ar->bmi.done_sent = false; } @@ -33,10 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar) u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done); int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n"); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n"); if (ar->bmi.done_sent) { - ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n"); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n"); return 0; } @@ -45,7 +45,7 @@ int ath10k_bmi_done(struct ath10k *ar) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device: %d\n", ret); + ath10k_warn(ar, "unable to write to the device: %d\n", ret); return ret; } @@ -61,10 +61,10 @@ int ath10k_bmi_get_target_info(struct ath10k *ar, u32 resplen = sizeof(resp.get_target_info); int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n"); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n"); if (ar->bmi.done_sent) { - ath10k_warn("BMI Get Target Info Command disallowed\n"); + ath10k_warn(ar, "BMI Get Target Info Command disallowed\n"); return -EBUSY; } @@ -72,12 +72,12 @@ int ath10k_bmi_get_target_info(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { - ath10k_warn("unable to get target info from device\n"); + ath10k_warn(ar, "unable to get target info from device\n"); return ret; } if (resplen < sizeof(resp.get_target_info)) { - ath10k_warn("invalid get_target_info response length (%d)\n", + ath10k_warn(ar, "invalid get_target_info response length (%d)\n", resplen); return -EIO; } @@ -97,11 +97,11 @@ int ath10k_bmi_read_memory(struct ath10k *ar, u32 rxlen; int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", address, length); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -115,7 +115,7 @@ int ath10k_bmi_read_memory(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &rxlen); if (ret) { - ath10k_warn("unable to read from the device (%d)\n", + ath10k_warn(ar, "unable to read from the device (%d)\n", ret); return ret; } @@ -137,11 +137,11 @@ int ath10k_bmi_write_memory(struct ath10k *ar, u32 txlen; int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n", address, length); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -159,7 +159,7 @@ int ath10k_bmi_write_memory(struct ath10k *ar, ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device (%d)\n", + ath10k_warn(ar, "unable to write to the device (%d)\n", ret); return ret; } @@ -183,11 +183,11 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result) u32 resplen = sizeof(resp.execute); int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", address, param); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -197,19 +197,19 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { - ath10k_warn("unable to read from the device\n"); + ath10k_warn(ar, "unable to read from the device\n"); return ret; } if (resplen < sizeof(resp.execute)) { - ath10k_warn("invalid execute response length (%d)\n", + ath10k_warn(ar, "invalid execute response length (%d)\n", resplen); return -EIO; } *result = __le32_to_cpu(resp.execute.result); - ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result); + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result); return 0; } @@ -221,11 +221,11 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) u32 txlen; int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", buffer, length); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -241,7 +241,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, NULL, NULL); if (ret) { - ath10k_warn("unable to write to the device\n"); + ath10k_warn(ar, "unable to write to the device\n"); return ret; } @@ -258,11 +258,11 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start); int ret; - ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n", address); if (ar->bmi.done_sent) { - ath10k_warn("command disallowed\n"); + ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } @@ -271,7 +271,7 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); if (ret) { - ath10k_warn("unable to Start LZ Stream to the device\n"); + ath10k_warn(ar, "unable to Start LZ Stream to the device\n"); return ret; } @@ -286,7 +286,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 trailer_len = length - head_len; int ret; - ath10k_dbg(ATH10K_DBG_BMI, + ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi fast download address 0x%x buffer 0x%p length %d\n", address, buffer, length); diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 4333107ecf37..71eef233bd01 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -284,13 +284,9 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, int ret = 0; if (nbytes > ce_state->src_sz_max) - ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n", + ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); - ret = ath10k_pci_wake(ar); - if (ret) - return ret; - if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { ret = -ENOSR; @@ -325,7 +321,6 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, src_ring->write_index = write_index; exit: - ath10k_pci_sleep(ar); return ret; } @@ -390,49 +385,57 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) return delta; } -int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, - void *per_recv_context, - u32 buffer) + +int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) { - struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; - u32 ctrl_addr = ce_state->ctrl_addr; - struct ath10k *ar = ce_state->ar; + struct ath10k *ar = pipe->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; unsigned int nentries_mask = dest_ring->nentries_mask; - unsigned int write_index; - unsigned int sw_index; - int ret; + unsigned int write_index = dest_ring->write_index; + unsigned int sw_index = dest_ring->sw_index; - spin_lock_bh(&ar_pci->ce_lock); - write_index = dest_ring->write_index; - sw_index = dest_ring->sw_index; + lockdep_assert_held(&ar_pci->ce_lock); - ret = ath10k_pci_wake(ar); - if (ret) - goto out; + return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); +} - if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { - struct ce_desc *base = dest_ring->base_addr_owner_space; - struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); +int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_ring *dest_ring = pipe->dest_ring; + unsigned int nentries_mask = dest_ring->nentries_mask; + unsigned int write_index = dest_ring->write_index; + unsigned int sw_index = dest_ring->sw_index; + struct ce_desc *base = dest_ring->base_addr_owner_space; + struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); + u32 ctrl_addr = pipe->ctrl_addr; - /* Update destination descriptor */ - desc->addr = __cpu_to_le32(buffer); - desc->nbytes = 0; + lockdep_assert_held(&ar_pci->ce_lock); - dest_ring->per_transfer_context[write_index] = - per_recv_context; + if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0) + return -EIO; - /* Update Destination Ring Write Index */ - write_index = CE_RING_IDX_INCR(nentries_mask, write_index); - ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); - dest_ring->write_index = write_index; - ret = 0; - } else { - ret = -EIO; - } - ath10k_pci_sleep(ar); + desc->addr = __cpu_to_le32(paddr); + desc->nbytes = 0; + + dest_ring->per_transfer_context[write_index] = ctx; + write_index = CE_RING_IDX_INCR(nentries_mask, write_index); + ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); + dest_ring->write_index = write_index; + + return 0; +} + +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr) +{ + struct ath10k *ar = pipe->ar; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; -out: + spin_lock_bh(&ar_pci->ce_lock); + ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr); spin_unlock_bh(&ar_pci->ce_lock); return ret; @@ -588,7 +591,6 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, unsigned int sw_index = src_ring->sw_index; struct ce_desc *sdesc, *sbase; unsigned int read_index; - int ret; if (src_ring->hw_index == sw_index) { /* @@ -599,18 +601,12 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, * value of the HW index has become stale. */ - ret = ath10k_pci_wake(ar); - if (ret) - return ret; - read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); if (read_index == 0xffffffff) return -ENODEV; read_index &= nentries_mask; src_ring->hw_index = read_index; - - ath10k_pci_sleep(ar); } read_index = src_ring->hw_index; @@ -731,11 +727,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; u32 ctrl_addr = ce_state->ctrl_addr; - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) - return; spin_lock_bh(&ar_pci->ce_lock); @@ -760,7 +751,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); spin_unlock_bh(&ar_pci->ce_lock); - ath10k_pci_sleep(ar); } /* @@ -771,13 +761,9 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) void ath10k_ce_per_engine_service_any(struct ath10k *ar) { - int ce_id, ret; + int ce_id; u32 intr_summary; - ret = ath10k_pci_wake(ar); - if (ret) - return; - intr_summary = CE_INTERRUPT_SUMMARY(ar); for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) { @@ -789,8 +775,6 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) ath10k_ce_per_engine_service(ar, ce_id); } - - ath10k_pci_sleep(ar); } /* @@ -800,16 +784,11 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) * * Called with ce_lock held. */ -static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state, - int disable_copy_compl_intr) +static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state) { u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) - return; + bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR; if ((!disable_copy_compl_intr) && (ce_state->send_cb || ce_state->recv_cb)) @@ -818,17 +797,11 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state, ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); ath10k_ce_watermark_intr_disable(ar, ctrl_addr); - - ath10k_pci_sleep(ar); } int ath10k_ce_disable_interrupts(struct ath10k *ar) { - int ce_id, ret; - - ret = ath10k_pci_wake(ar); - if (ret) - return ret; + int ce_id; for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { u32 ctrl_addr = ath10k_ce_base_address(ce_id); @@ -838,34 +811,16 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) ath10k_ce_watermark_intr_disable(ar, ctrl_addr); } - ath10k_pci_sleep(ar); - return 0; } -void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, - void (*send_cb)(struct ath10k_ce_pipe *), - int disable_interrupts) +void ath10k_ce_enable_interrupts(struct ath10k *ar) { - struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ce_id; - spin_lock_bh(&ar_pci->ce_lock); - ce_state->send_cb = send_cb; - ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts); - spin_unlock_bh(&ar_pci->ce_lock); -} - -void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, - void (*recv_cb)(struct ath10k_ce_pipe *)) -{ - struct ath10k *ar = ce_state->ar; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - spin_lock_bh(&ar_pci->ce_lock); - ce_state->recv_cb = recv_cb; - ath10k_ce_per_engine_handler_adjust(ce_state, 0); - spin_unlock_bh(&ar_pci->ce_lock); + for (ce_id = 0; ce_id < CE_COUNT; ce_id++) + ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]); } static int ath10k_ce_init_src_ring(struct ath10k *ar, @@ -898,7 +853,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot init ce src ring id %d entries %d base_addr %p\n", ce_id, nentries, src_ring->base_addr_owner_space); @@ -932,7 +887,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ce dest ring id %d entries %d base_addr %p\n", ce_id, nentries, dest_ring->base_addr_owner_space); @@ -1067,7 +1022,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, * initialized by software/firmware. */ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, - const struct ce_attr *attr) + const struct ce_attr *attr, + void (*send_cb)(struct ath10k_ce_pipe *), + void (*recv_cb)(struct ath10k_ce_pipe *)) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; @@ -1084,39 +1041,37 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC > (CE_HTT_H2T_MSG_SRC_NENTRIES - 1)); - ret = ath10k_pci_wake(ar); - if (ret) - return ret; - spin_lock_bh(&ar_pci->ce_lock); ce_state->ar = ar; ce_state->id = ce_id; ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); ce_state->attr_flags = attr->flags; ce_state->src_sz_max = attr->src_sz_max; + if (attr->src_nentries) + ce_state->send_cb = send_cb; + if (attr->dest_nentries) + ce_state->recv_cb = recv_cb; spin_unlock_bh(&ar_pci->ce_lock); if (attr->src_nentries) { ret = ath10k_ce_init_src_ring(ar, ce_id, attr); if (ret) { - ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", + ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n", ce_id, ret); - goto out; + return ret; } } if (attr->dest_nentries) { ret = ath10k_ce_init_dest_ring(ar, ce_id, attr); if (ret) { - ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", + ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n", ce_id, ret); - goto out; + return ret; } } -out: - ath10k_pci_sleep(ar); - return ret; + return 0; } static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) @@ -1140,16 +1095,8 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) { - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) - return; - ath10k_ce_deinit_src_ring(ar, ce_id); ath10k_ce_deinit_dest_ring(ar, ce_id); - - ath10k_pci_sleep(ar); } int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, @@ -1163,7 +1110,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr); if (IS_ERR(ce_state->src_ring)) { ret = PTR_ERR(ce_state->src_ring); - ath10k_err("failed to allocate copy engine source ring %d: %d\n", + ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n", ce_id, ret); ce_state->src_ring = NULL; return ret; @@ -1175,7 +1122,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, attr); if (IS_ERR(ce_state->dest_ring)) { ret = PTR_ERR(ce_state->dest_ring); - ath10k_err("failed to allocate copy engine destination ring %d: %d\n", + ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n", ce_id, ret); ce_state->dest_ring = NULL; return ret; diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 7a5a36fc59c1..82d1f23546b9 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -162,30 +162,13 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe); -void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, - void (*send_cb)(struct ath10k_ce_pipe *), - int disable_interrupts); - int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe); /*==================Recv=======================*/ -/* - * Make a buffer available to receive. The buffer must be at least of a - * minimal size appropriate for this copy engine (src_sz_max attribute). - * ce - which copy engine to use - * per_transfer_recv_context - context passed back to caller's recv_cb - * buffer - address of buffer in CE space - * Returns 0 on success; otherwise an error status. - * - * Implemenation note: Pushes a buffer to Dest ring. - */ -int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, - void *per_transfer_recv_context, - u32 buffer); - -void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, - void (*recv_cb)(struct ath10k_ce_pipe *)); +int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe); +int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr); /* recv flags */ /* Data is byte-swapped */ @@ -214,7 +197,9 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, /*==================CE Engine Initialization=======================*/ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, - const struct ce_attr *attr); + const struct ce_attr *attr, + void (*send_cb)(struct ath10k_ce_pipe *), + void (*recv_cb)(struct ath10k_ce_pipe *)); void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, const struct ce_attr *attr); @@ -245,6 +230,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, void ath10k_ce_per_engine_service_any(struct ath10k *ar); void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); int ath10k_ce_disable_interrupts(struct ath10k *ar); +void ath10k_ce_enable_interrupts(struct ath10k *ar); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 93adb8c58969..651a6da8adf5 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -53,7 +53,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { static void ath10k_send_suspend_complete(struct ath10k *ar) { - ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n"); complete(&ar->target_suspend); } @@ -67,14 +67,14 @@ static int ath10k_init_configure_target(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_app_host_interest, HTC_PROTOCOL_VERSION); if (ret) { - ath10k_err("settings HTC version failed\n"); + ath10k_err(ar, "settings HTC version failed\n"); return ret; } /* set the firmware mode to STA/IBSS/AP */ ret = ath10k_bmi_read32(ar, hi_option_flag, ¶m_host); if (ret) { - ath10k_err("setting firmware mode (1/2) failed\n"); + ath10k_err(ar, "setting firmware mode (1/2) failed\n"); return ret; } @@ -93,14 +93,14 @@ static int ath10k_init_configure_target(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_option_flag, param_host); if (ret) { - ath10k_err("setting firmware mode (2/2) failed\n"); + ath10k_err(ar, "setting firmware mode (2/2) failed\n"); return ret; } /* We do all byte-swapping on the host */ ret = ath10k_bmi_write32(ar, hi_be, 0); if (ret) { - ath10k_err("setting host CPU BE mode failed\n"); + ath10k_err(ar, "setting host CPU BE mode failed\n"); return ret; } @@ -108,7 +108,7 @@ static int ath10k_init_configure_target(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_fw_swap, 0); if (ret) { - ath10k_err("setting FW data/desc swap flags failed\n"); + ath10k_err(ar, "setting FW data/desc swap flags failed\n"); return ret; } @@ -146,11 +146,12 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr); if (ret) { - ath10k_err("could not read board ext data addr (%d)\n", ret); + ath10k_err(ar, "could not read board ext data addr (%d)\n", + ret); return ret; } - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot push board extended data addr 0x%x\n", board_ext_data_addr); @@ -158,7 +159,7 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) return 0; if (ar->board_len != (board_data_size + board_ext_data_size)) { - ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n", + ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n", ar->board_len, board_data_size, board_ext_data_size); return -EINVAL; } @@ -167,14 +168,15 @@ static int ath10k_push_board_ext_data(struct ath10k *ar) ar->board_data + board_data_size, board_ext_data_size); if (ret) { - ath10k_err("could not write board ext data (%d)\n", ret); + ath10k_err(ar, "could not write board ext data (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_board_ext_data_config, (board_ext_data_size << 16) | 1); if (ret) { - ath10k_err("could not write board ext data bit (%d)\n", ret); + ath10k_err(ar, "could not write board ext data bit (%d)\n", + ret); return ret; } @@ -189,13 +191,13 @@ static int ath10k_download_board_data(struct ath10k *ar) ret = ath10k_push_board_ext_data(ar); if (ret) { - ath10k_err("could not push board ext data (%d)\n", ret); + ath10k_err(ar, "could not push board ext data (%d)\n", ret); goto exit; } ret = ath10k_bmi_read32(ar, hi_board_data, &address); if (ret) { - ath10k_err("could not read board data addr (%d)\n", ret); + ath10k_err(ar, "could not read board data addr (%d)\n", ret); goto exit; } @@ -203,13 +205,13 @@ static int ath10k_download_board_data(struct ath10k *ar) min_t(u32, board_data_size, ar->board_len)); if (ret) { - ath10k_err("could not write board data (%d)\n", ret); + ath10k_err(ar, "could not write board data (%d)\n", ret); goto exit; } ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1); if (ret) { - ath10k_err("could not write board data bit (%d)\n", ret); + ath10k_err(ar, "could not write board data bit (%d)\n", ret); goto exit; } @@ -225,30 +227,30 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) /* OTP is optional */ if (!ar->otp_data || !ar->otp_len) { - ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", + ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n", ar->otp_data, ar->otp_len); return 0; } - ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n", address, ar->otp_len); ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len); if (ret) { - ath10k_err("could not write otp (%d)\n", ret); + ath10k_err(ar, "could not write otp (%d)\n", ret); return ret; } ret = ath10k_bmi_execute(ar, address, 0, &result); if (ret) { - ath10k_err("could not execute otp (%d)\n", ret); + ath10k_err(ar, "could not execute otp (%d)\n", ret); return ret; } - ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result); if (result != 0) { - ath10k_err("otp calibration failed: %d", result); + ath10k_err(ar, "otp calibration failed: %d", result); return -EINVAL; } @@ -265,7 +267,7 @@ static int ath10k_download_fw(struct ath10k *ar) ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data, ar->firmware_len); if (ret) { - ath10k_err("could not write fw (%d)\n", ret); + ath10k_err(ar, "could not write fw (%d)\n", ret); goto exit; } @@ -302,12 +304,12 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) int ret = 0; if (ar->hw_params.fw.fw == NULL) { - ath10k_err("firmware file not defined\n"); + ath10k_err(ar, "firmware file not defined\n"); return -EINVAL; } if (ar->hw_params.fw.board == NULL) { - ath10k_err("board data file not defined"); + ath10k_err(ar, "board data file not defined"); return -EINVAL; } @@ -316,7 +318,7 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) ar->hw_params.fw.board); if (IS_ERR(ar->board)) { ret = PTR_ERR(ar->board); - ath10k_err("could not fetch board data (%d)\n", ret); + ath10k_err(ar, "could not fetch board data (%d)\n", ret); goto err; } @@ -328,7 +330,7 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) ar->hw_params.fw.fw); if (IS_ERR(ar->firmware)) { ret = PTR_ERR(ar->firmware); - ath10k_err("could not fetch firmware (%d)\n", ret); + ath10k_err(ar, "could not fetch firmware (%d)\n", ret); goto err; } @@ -344,7 +346,7 @@ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar) ar->hw_params.fw.otp); if (IS_ERR(ar->otp)) { ret = PTR_ERR(ar->otp); - ath10k_err("could not fetch otp (%d)\n", ret); + ath10k_err(ar, "could not fetch otp (%d)\n", ret); goto err; } @@ -369,7 +371,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) /* first fetch the firmware file (firmware-*.bin) */ ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name); if (IS_ERR(ar->firmware)) { - ath10k_err("could not fetch firmware file '%s/%s': %ld\n", + ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n", ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware)); return PTR_ERR(ar->firmware); } @@ -381,14 +383,14 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1; if (len < magic_len) { - ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n", + ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n", ar->hw_params.fw.dir, name, len); ret = -EINVAL; goto err; } if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) { - ath10k_err("invalid firmware magic\n"); + ath10k_err(ar, "invalid firmware magic\n"); ret = -EINVAL; goto err; } @@ -410,7 +412,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) data += sizeof(*hdr); if (len < ie_len) { - ath10k_err("invalid length for FW IE %d (%zu < %zu)\n", + ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n", ie_id, len, ie_len); ret = -EINVAL; goto err; @@ -424,7 +426,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) memcpy(ar->hw->wiphy->fw_version, data, ie_len); ar->hw->wiphy->fw_version[ie_len] = '\0'; - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw version %s\n", ar->hw->wiphy->fw_version); break; @@ -434,11 +436,11 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) timestamp = (__le32 *)data; - ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n", le32_to_cpup(timestamp)); break; case ATH10K_FW_IE_FEATURES: - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found firmware features ie (%zd B)\n", ie_len); @@ -450,19 +452,19 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) break; if (data[index] & (1 << bit)) { - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "Enabling feature bit: %i\n", i); __set_bit(i, ar->fw_features); } } - ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "", + ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", ar->fw_features, sizeof(ar->fw_features)); break; case ATH10K_FW_IE_FW_IMAGE: - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw image ie (%zd B)\n", ie_len); @@ -471,7 +473,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) break; case ATH10K_FW_IE_OTP_IMAGE: - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "found otp image ie (%zd B)\n", ie_len); @@ -480,7 +482,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) break; default: - ath10k_warn("Unknown FW IE: %u\n", + ath10k_warn(ar, "Unknown FW IE: %u\n", le32_to_cpu(hdr->id)); break; } @@ -493,15 +495,22 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) } if (!ar->firmware_data || !ar->firmware_len) { - ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", + ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n", ar->hw_params.fw.dir, name); ret = -ENOMEDIUM; goto err; } + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) && + !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well"); + ret = -EINVAL; + goto err; + } + /* now fetch the board file */ if (ar->hw_params.fw.board == NULL) { - ath10k_err("board data file not defined"); + ath10k_err(ar, "board data file not defined"); ret = -EINVAL; goto err; } @@ -511,7 +520,7 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name) ar->hw_params.fw.board); if (IS_ERR(ar->board)) { ret = PTR_ERR(ar->board); - ath10k_err("could not fetch board data '%s/%s' (%d)\n", + ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n", ar->hw_params.fw.dir, ar->hw_params.fw.board, ret); goto err; @@ -531,22 +540,29 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar) { int ret; + ar->fw_api = 3; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + + ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE); + if (ret == 0) + goto success; + ar->fw_api = 2; - ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE); if (ret == 0) goto success; ar->fw_api = 1; - ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api); ret = ath10k_core_fetch_firmware_api_1(ar); if (ret) return ret; success: - ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api); return 0; } @@ -557,19 +573,19 @@ static int ath10k_init_download_firmware(struct ath10k *ar) ret = ath10k_download_board_data(ar); if (ret) { - ath10k_err("failed to download board data: %d\n", ret); + ath10k_err(ar, "failed to download board data: %d\n", ret); return ret; } ret = ath10k_download_and_run_otp(ar); if (ret) { - ath10k_err("failed to run otp: %d\n", ret); + ath10k_err(ar, "failed to run otp: %d\n", ret); return ret; } ret = ath10k_download_fw(ar); if (ret) { - ath10k_err("failed to download firmware: %d\n", ret); + ath10k_err(ar, "failed to download firmware: %d\n", ret); return ret; } @@ -586,7 +602,7 @@ static int ath10k_init_uart(struct ath10k *ar) */ ret = ath10k_bmi_write32(ar, hi_serial_enable, 0); if (ret) { - ath10k_warn("could not disable UART prints (%d)\n", ret); + ath10k_warn(ar, "could not disable UART prints (%d)\n", ret); return ret; } @@ -595,24 +611,24 @@ static int ath10k_init_uart(struct ath10k *ar) ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7); if (ret) { - ath10k_warn("could not enable UART prints (%d)\n", ret); + ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); return ret; } ret = ath10k_bmi_write32(ar, hi_serial_enable, 1); if (ret) { - ath10k_warn("could not enable UART prints (%d)\n", ret); + ath10k_warn(ar, "could not enable UART prints (%d)\n", ret); return ret; } /* Set the UART baud rate to 19200. */ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200); if (ret) { - ath10k_warn("could not set the baud rate (%d)\n", ret); + ath10k_warn(ar, "could not set the baud rate (%d)\n", ret); return ret; } - ath10k_info("UART prints enabled\n"); + ath10k_info(ar, "UART prints enabled\n"); return 0; } @@ -629,14 +645,14 @@ static int ath10k_init_hw_params(struct ath10k *ar) } if (i == ARRAY_SIZE(ath10k_hw_params_list)) { - ath10k_err("Unsupported hardware version: 0x%x\n", + ath10k_err(ar, "Unsupported hardware version: 0x%x\n", ar->target_version); return -EINVAL; } ar->hw_params = *hw_params; - ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n", ar->hw_params.name, ar->target_version); return 0; @@ -651,14 +667,14 @@ static void ath10k_core_restart(struct work_struct *work) switch (ar->state) { case ATH10K_STATE_ON: ar->state = ATH10K_STATE_RESTARTING; - del_timer_sync(&ar->scan.timeout); - ath10k_reset_scan((unsigned long)ar); + ath10k_hif_stop(ar); + ath10k_scan_finish(ar); ieee80211_restart_hw(ar->hw); break; case ATH10K_STATE_OFF: /* this can happen if driver is being unloaded * or if the crash happens during FW probing */ - ath10k_warn("cannot restart a device that hasn't been started\n"); + ath10k_warn(ar, "cannot restart a device that hasn't been started\n"); break; case ATH10K_STATE_RESTARTING: /* hw restart might be requested from multiple places */ @@ -667,7 +683,7 @@ static void ath10k_core_restart(struct work_struct *work) ar->state = ATH10K_STATE_WEDGED; /* fall through */ case ATH10K_STATE_WEDGED: - ath10k_warn("device is wedged, will not restart\n"); + ath10k_warn(ar, "device is wedged, will not restart\n"); break; } @@ -700,7 +716,7 @@ int ath10k_core_start(struct ath10k *ar) status = ath10k_htc_init(ar); if (status) { - ath10k_err("could not init HTC (%d)\n", status); + ath10k_err(ar, "could not init HTC (%d)\n", status); goto err; } @@ -710,90 +726,91 @@ int ath10k_core_start(struct ath10k *ar) status = ath10k_wmi_attach(ar); if (status) { - ath10k_err("WMI attach failed: %d\n", status); + ath10k_err(ar, "WMI attach failed: %d\n", status); goto err; } status = ath10k_htt_init(ar); if (status) { - ath10k_err("failed to init htt: %d\n", status); + ath10k_err(ar, "failed to init htt: %d\n", status); goto err_wmi_detach; } status = ath10k_htt_tx_alloc(&ar->htt); if (status) { - ath10k_err("failed to alloc htt tx: %d\n", status); + ath10k_err(ar, "failed to alloc htt tx: %d\n", status); goto err_wmi_detach; } status = ath10k_htt_rx_alloc(&ar->htt); if (status) { - ath10k_err("failed to alloc htt rx: %d\n", status); + ath10k_err(ar, "failed to alloc htt rx: %d\n", status); goto err_htt_tx_detach; } status = ath10k_hif_start(ar); if (status) { - ath10k_err("could not start HIF: %d\n", status); + ath10k_err(ar, "could not start HIF: %d\n", status); goto err_htt_rx_detach; } status = ath10k_htc_wait_target(&ar->htc); if (status) { - ath10k_err("failed to connect to HTC: %d\n", status); + ath10k_err(ar, "failed to connect to HTC: %d\n", status); goto err_hif_stop; } status = ath10k_htt_connect(&ar->htt); if (status) { - ath10k_err("failed to connect htt (%d)\n", status); + ath10k_err(ar, "failed to connect htt (%d)\n", status); goto err_hif_stop; } status = ath10k_wmi_connect(ar); if (status) { - ath10k_err("could not connect wmi: %d\n", status); + ath10k_err(ar, "could not connect wmi: %d\n", status); goto err_hif_stop; } status = ath10k_htc_start(&ar->htc); if (status) { - ath10k_err("failed to start htc: %d\n", status); + ath10k_err(ar, "failed to start htc: %d\n", status); goto err_hif_stop; } status = ath10k_wmi_wait_for_service_ready(ar); if (status <= 0) { - ath10k_warn("wmi service ready event not received"); + ath10k_warn(ar, "wmi service ready event not received"); status = -ETIMEDOUT; - goto err_htc_stop; + goto err_hif_stop; } - ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ar->hw->wiphy->fw_version); status = ath10k_wmi_cmd_init(ar); if (status) { - ath10k_err("could not send WMI init command (%d)\n", status); - goto err_htc_stop; + ath10k_err(ar, "could not send WMI init command (%d)\n", + status); + goto err_hif_stop; } status = ath10k_wmi_wait_for_unified_ready(ar); if (status <= 0) { - ath10k_err("wmi unified ready event not received\n"); + ath10k_err(ar, "wmi unified ready event not received\n"); status = -ETIMEDOUT; - goto err_htc_stop; + goto err_hif_stop; } status = ath10k_htt_setup(&ar->htt); if (status) { - ath10k_err("failed to setup htt: %d\n", status); - goto err_htc_stop; + ath10k_err(ar, "failed to setup htt: %d\n", status); + goto err_hif_stop; } status = ath10k_debug_start(ar); if (status) - goto err_htc_stop; + goto err_hif_stop; if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1; @@ -802,28 +819,8 @@ int ath10k_core_start(struct ath10k *ar) INIT_LIST_HEAD(&ar->arvifs); - if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) { - ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n", - ar->hw_params.name, - ar->target_version, - ar->chip_id, - ar->hw->wiphy->fw_version, - ar->fw_api, - ar->htt.target_version_major, - ar->htt.target_version_minor); - ath10k_info("debug %d debugfs %d tracing %d dfs %d\n", - config_enabled(CONFIG_ATH10K_DEBUG), - config_enabled(CONFIG_ATH10K_DEBUGFS), - config_enabled(CONFIG_ATH10K_TRACING), - config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)); - } - - __set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags); - return 0; -err_htc_stop: - ath10k_htc_stop(&ar->htc); err_hif_stop: ath10k_hif_stop(ar); err_htt_rx_detach: @@ -845,14 +842,14 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt) ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt); if (ret) { - ath10k_warn("could not suspend target (%d)\n", ret); + ath10k_warn(ar, "could not suspend target (%d)\n", ret); return ret; } ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ); if (ret == 0) { - ath10k_warn("suspend timed out - target pause event never came\n"); + ath10k_warn(ar, "suspend timed out - target pause event never came\n"); return -ETIMEDOUT; } @@ -868,7 +865,6 @@ void ath10k_core_stop(struct ath10k *ar) ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR); ath10k_debug_stop(ar); - ath10k_htc_stop(&ar->htc); ath10k_hif_stop(ar); ath10k_htt_tx_free(&ar->htt); ath10k_htt_rx_free(&ar->htt); @@ -887,14 +883,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ret = ath10k_hif_power_up(ar); if (ret) { - ath10k_err("could not start pci hif (%d)\n", ret); + ath10k_err(ar, "could not start pci hif (%d)\n", ret); return ret; } memset(&target_info, 0, sizeof(target_info)); ret = ath10k_bmi_get_target_info(ar, &target_info); if (ret) { - ath10k_err("could not get target info (%d)\n", ret); + ath10k_err(ar, "could not get target info (%d)\n", ret); ath10k_hif_power_down(ar); return ret; } @@ -904,14 +900,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ret = ath10k_init_hw_params(ar); if (ret) { - ath10k_err("could not get hw params (%d)\n", ret); + ath10k_err(ar, "could not get hw params (%d)\n", ret); ath10k_hif_power_down(ar); return ret; } ret = ath10k_core_fetch_firmware_files(ar); if (ret) { - ath10k_err("could not fetch firmware files (%d)\n", ret); + ath10k_err(ar, "could not fetch firmware files (%d)\n", ret); ath10k_hif_power_down(ar); return ret; } @@ -920,13 +916,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ret = ath10k_core_start(ar); if (ret) { - ath10k_err("could not init core (%d)\n", ret); + ath10k_err(ar, "could not init core (%d)\n", ret); ath10k_core_free_firmware_files(ar); ath10k_hif_power_down(ar); mutex_unlock(&ar->conf_mutex); return ret; } + ath10k_print_driver_info(ar); ath10k_core_stop(ar); mutex_unlock(&ar->conf_mutex); @@ -939,7 +936,7 @@ static int ath10k_core_check_chip_id(struct ath10k *ar) { u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV); - ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n", ar->chip_id, hw_revision); /* Check that we are not using hw1.0 (some of them have same pci id @@ -947,7 +944,7 @@ static int ath10k_core_check_chip_id(struct ath10k *ar) * due to missing hw1.0 workarounds. */ switch (hw_revision) { case QCA988X_HW_1_0_CHIP_ID_REV: - ath10k_err("ERROR: qca988x hw1.0 is not supported\n"); + ath10k_err(ar, "ERROR: qca988x hw1.0 is not supported\n"); return -EOPNOTSUPP; case QCA988X_HW_2_0_CHIP_ID_REV: @@ -955,7 +952,7 @@ static int ath10k_core_check_chip_id(struct ath10k *ar) return 0; default: - ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n", + ath10k_warn(ar, "Warning: hardware revision unknown (0x%x), expect problems\n", ar->chip_id); return 0; } @@ -970,25 +967,33 @@ static void ath10k_core_register_work(struct work_struct *work) status = ath10k_core_probe_fw(ar); if (status) { - ath10k_err("could not probe fw (%d)\n", status); + ath10k_err(ar, "could not probe fw (%d)\n", status); goto err; } status = ath10k_mac_register(ar); if (status) { - ath10k_err("could not register to mac80211 (%d)\n", status); + ath10k_err(ar, "could not register to mac80211 (%d)\n", status); goto err_release_fw; } status = ath10k_debug_create(ar); if (status) { - ath10k_err("unable to initialize debugfs\n"); + ath10k_err(ar, "unable to initialize debugfs\n"); goto err_unregister_mac; } + status = ath10k_spectral_create(ar); + if (status) { + ath10k_err(ar, "failed to initialize spectral\n"); + goto err_debug_destroy; + } + set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags); return; +err_debug_destroy: + ath10k_debug_destroy(ar); err_unregister_mac: ath10k_mac_unregister(ar); err_release_fw: @@ -1008,7 +1013,7 @@ int ath10k_core_register(struct ath10k *ar, u32 chip_id) status = ath10k_core_check_chip_id(ar); if (status) { - ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id); + ath10k_err(ar, "Unsupported chip id 0x%08x\n", ar->chip_id); return status; } @@ -1025,6 +1030,12 @@ void ath10k_core_unregister(struct ath10k *ar) if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) return; + /* Stop spectral before unregistering from mac80211 to remove the + * relayfs debugfs file cleanly. Otherwise the parent debugfs tree + * would be already be free'd recursively, leading to a double free. + */ + ath10k_spectral_destroy(ar); + /* We must unregister from mac80211 before we stop HTC and HIF. * Otherwise we will fail to submit commands to FW and mac80211 will be * unhappy about callback failures. */ @@ -1036,12 +1047,12 @@ void ath10k_core_unregister(struct ath10k *ar) } EXPORT_SYMBOL(ath10k_core_unregister); -struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, +struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, const struct ath10k_hif_ops *hif_ops) { struct ath10k *ar; - ar = ath10k_mac_create(); + ar = ath10k_mac_create(priv_size); if (!ar) return NULL; @@ -1051,7 +1062,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, ar->p2p = !!ath10k_p2p; ar->dev = dev; - ar->hif.priv = hif_priv; ar->hif.ops = hif_ops; init_completion(&ar->scan.started); @@ -1062,7 +1072,7 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, init_completion(&ar->install_key_done); init_completion(&ar->vdev_setup_done); - setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar); + INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work); ar->workqueue = create_singlethread_workqueue("ath10k_wq"); if (!ar->workqueue) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 83a5fa91531d..4ef476099225 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -22,6 +22,8 @@ #include <linux/if_ether.h> #include <linux/types.h> #include <linux/pci.h> +#include <linux/uuid.h> +#include <linux/time.h> #include "htt.h" #include "htc.h" @@ -31,6 +33,7 @@ #include "../ath.h" #include "../regd.h" #include "../dfs_pattern_detector.h" +#include "spectral.h" #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB) #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -237,6 +240,7 @@ struct ath10k_vif { bool is_started; bool is_up; + bool spectral_enabled; u32 aid; u8 bssid[ETH_ALEN]; @@ -276,11 +280,20 @@ struct ath10k_vif_iter { struct ath10k_vif *arvif; }; +/* used for crash-dump storage, protected by data-lock */ +struct ath10k_fw_crash_data { + bool crashed_since_read; + + uuid_le uuid; + struct timespec timestamp; + __le32 registers[REG_DUMP_COUNT_QCA988X]; +}; + struct ath10k_debug { struct dentry *debugfs_phy; struct ath10k_target_stats target_stats; - u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE]; + DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_BM_SIZE); struct completion event_stats_compl; @@ -293,6 +306,8 @@ struct ath10k_debug { u8 htt_max_amsdu; u8 htt_max_ampdu; + + struct ath10k_fw_crash_data *fw_crash_data; }; enum ath10k_state { @@ -330,6 +345,11 @@ enum ath10k_fw_features { /* Firmware does not support P2P */ ATH10K_FW_FEATURE_NO_P2P = 3, + /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature bit + * is required to be set as well. + */ + ATH10K_FW_FEATURE_WMI_10_2 = 4, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -337,10 +357,32 @@ enum ath10k_fw_features { enum ath10k_dev_flags { /* Indicates that ath10k device is during CAC phase of DFS */ ATH10K_CAC_RUNNING, - ATH10K_FLAG_FIRST_BOOT_DONE, ATH10K_FLAG_CORE_REGISTERED, }; +enum ath10k_scan_state { + ATH10K_SCAN_IDLE, + ATH10K_SCAN_STARTING, + ATH10K_SCAN_RUNNING, + ATH10K_SCAN_ABORTING, +}; + +static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state) +{ + switch (state) { + case ATH10K_SCAN_IDLE: + return "idle"; + case ATH10K_SCAN_STARTING: + return "starting"; + case ATH10K_SCAN_RUNNING: + return "running"; + case ATH10K_SCAN_ABORTING: + return "aborting"; + } + + return "unknown"; +} + struct ath10k { struct ath_common ath_common; struct ieee80211_hw *hw; @@ -368,7 +410,6 @@ struct ath10k { bool p2p; struct { - void *priv; const struct ath10k_hif_ops *ops; } hif; @@ -410,10 +451,9 @@ struct ath10k { struct completion started; struct completion completed; struct completion on_channel; - struct timer_list timeout; + struct delayed_work timeout; + enum ath10k_scan_state state; bool is_roc; - bool in_progress; - bool aborting; int vdev_id; int roc_freq; } scan; @@ -494,9 +534,21 @@ struct ath10k { #ifdef CONFIG_ATH10K_DEBUGFS struct ath10k_debug debug; #endif + + struct { + /* relay(fs) channel for spectral scan */ + struct rchan *rfs_chan_spec_scan; + + /* spectral_mode and spec_config are protected by conf_mutex */ + enum ath10k_spectral_mode mode; + struct ath10k_spec_scan config; + } spectral; + + /* must be last */ + u8 drv_priv[0] __aligned(sizeof(void *)); }; -struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev, +struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, const struct ath10k_hif_ops *hif_ops); void ath10k_core_destroy(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 3030158c478e..f3f0a80f8bab 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -17,6 +17,9 @@ #include <linux/module.h> #include <linux/debugfs.h> +#include <linux/version.h> +#include <linux/vermagic.h> +#include <linux/vmalloc.h> #include "core.h" #include "debug.h" @@ -24,25 +27,86 @@ /* ms */ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000 -static int ath10k_printk(const char *level, const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - int rtn; +#define ATH10K_FW_CRASH_DUMP_VERSION 1 - va_start(args, fmt); +/** + * enum ath10k_fw_crash_dump_type - types of data in the dump file + * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format + */ +enum ath10k_fw_crash_dump_type { + ATH10K_FW_CRASH_DUMP_REGISTERS = 0, - vaf.fmt = fmt; - vaf.va = &args; + ATH10K_FW_CRASH_DUMP_MAX, +}; - rtn = printk("%sath10k: %pV", level, &vaf); +struct ath10k_tlv_dump_data { + /* see ath10k_fw_crash_dump_type above */ + __le32 type; - va_end(args); + /* in bytes */ + __le32 tlv_len; - return rtn; -} + /* pad to 32-bit boundaries as needed */ + u8 tlv_data[]; +} __packed; + +struct ath10k_dump_file_data { + /* dump file information */ + + /* "ATH10K-FW-DUMP" */ + char df_magic[16]; + + __le32 len; + + /* file dump version */ + __le32 version; + + /* some info we can get from ath10k struct that might help */ + + u8 uuid[16]; + + __le32 chip_id; + + /* 0 for now, in place for later hardware */ + __le32 bus_type; + + __le32 target_version; + __le32 fw_version_major; + __le32 fw_version_minor; + __le32 fw_version_release; + __le32 fw_version_build; + __le32 phy_capability; + __le32 hw_min_tx_power; + __le32 hw_max_tx_power; + __le32 ht_cap_info; + __le32 vht_cap_info; + __le32 num_rf_chains; + + /* firmware version string */ + char fw_ver[ETHTOOL_FWVERS_LEN]; + + /* Kernel related information */ + + /* time-of-day stamp */ + __le64 tv_sec; -int ath10k_info(const char *fmt, ...) + /* time-of-day stamp, nano-seconds */ + __le64 tv_nsec; + + /* LINUX_VERSION_CODE */ + __le32 kernel_ver_code; + + /* VERMAGIC_STRING */ + char kernel_ver[64]; + + /* room for growth w/out changing binary format */ + u8 unused[128]; + + /* struct ath10k_tlv_dump_data + more */ + u8 data[0]; +} __packed; + +int ath10k_info(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, @@ -52,7 +116,7 @@ int ath10k_info(const char *fmt, ...) va_start(args, fmt); vaf.va = &args; - ret = ath10k_printk(KERN_INFO, "%pV", &vaf); + ret = dev_info(ar->dev, "%pV", &vaf); trace_ath10k_log_info(&vaf); va_end(args); @@ -60,7 +124,25 @@ int ath10k_info(const char *fmt, ...) } EXPORT_SYMBOL(ath10k_info); -int ath10k_err(const char *fmt, ...) +void ath10k_print_driver_info(struct ath10k *ar) +{ + ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n", + ar->hw_params.name, + ar->target_version, + ar->chip_id, + ar->hw->wiphy->fw_version, + ar->fw_api, + ar->htt.target_version_major, + ar->htt.target_version_minor); + ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d\n", + config_enabled(CONFIG_ATH10K_DEBUG), + config_enabled(CONFIG_ATH10K_DEBUGFS), + config_enabled(CONFIG_ATH10K_TRACING), + config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)); +} +EXPORT_SYMBOL(ath10k_print_driver_info); + +int ath10k_err(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, @@ -70,7 +152,7 @@ int ath10k_err(const char *fmt, ...) va_start(args, fmt); vaf.va = &args; - ret = ath10k_printk(KERN_ERR, "%pV", &vaf); + ret = dev_err(ar->dev, "%pV", &vaf); trace_ath10k_log_err(&vaf); va_end(args); @@ -78,25 +160,21 @@ int ath10k_err(const char *fmt, ...) } EXPORT_SYMBOL(ath10k_err); -int ath10k_warn(const char *fmt, ...) +int ath10k_warn(struct ath10k *ar, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, }; va_list args; - int ret = 0; va_start(args, fmt); vaf.va = &args; - - if (net_ratelimit()) - ret = ath10k_printk(KERN_WARNING, "%pV", &vaf); - + dev_warn_ratelimited(ar->dev, "%pV", &vaf); trace_ath10k_log_warn(&vaf); va_end(args); - return ret; + return 0; } EXPORT_SYMBOL(ath10k_warn); @@ -115,9 +193,10 @@ static ssize_t ath10k_read_wmi_services(struct file *file, { struct ath10k *ar = file->private_data; char *buf; - unsigned int len = 0, buf_len = 1500; - const char *status; + unsigned int len = 0, buf_len = 4096; + const char *name; ssize_t ret_cnt; + bool enabled; int i; buf = kzalloc(buf_len, GFP_KERNEL); @@ -129,15 +208,22 @@ static ssize_t ath10k_read_wmi_services(struct file *file, if (len > buf_len) len = buf_len; - for (i = 0; i < WMI_SERVICE_LAST; i++) { - if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i)) - status = "enabled"; - else - status = "disabled"; + for (i = 0; i < WMI_MAX_SERVICE; i++) { + enabled = test_bit(i, ar->debug.wmi_service_bitmap); + name = wmi_service_name(i); + + if (!name) { + if (enabled) + len += scnprintf(buf + len, buf_len - len, + "%-40s %s (bit %d)\n", + "unknown", "enabled", i); + + continue; + } len += scnprintf(buf + len, buf_len - len, - "0x%02x - %20s - %s\n", - i, wmi_service_name(i), status); + "%-40s %s\n", + name, enabled ? "enabled" : "-"); } ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -309,7 +395,7 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf, ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT); if (ret) { - ath10k_warn("could not request stats (%d)\n", ret); + ath10k_warn(ar, "could not request stats (%d)\n", ret); goto exit; } @@ -527,11 +613,14 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, } if (!strcmp(buf, "soft")) { - ath10k_info("simulating soft firmware crash\n"); + ath10k_info(ar, "simulating soft firmware crash\n"); ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); } else if (!strcmp(buf, "hard")) { - ath10k_info("simulating hard firmware crash\n"); - ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1, + ath10k_info(ar, "simulating hard firmware crash\n"); + /* 0x7fff is vdev id, and it is always out of range for all + * firmware variants in order to force a firmware crash. + */ + ret = ath10k_wmi_vdev_set_param(ar, 0x7fff, ar->wmi.vdev_param->rts_threshold, 0); } else { ret = -EINVAL; @@ -539,7 +628,7 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, } if (ret) { - ath10k_warn("failed to simulate firmware crash: %d\n", ret); + ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret); goto exit; } @@ -577,6 +666,138 @@ static const struct file_operations fops_chip_id = { .llseek = default_llseek, }; +struct ath10k_fw_crash_data * +ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; + + lockdep_assert_held(&ar->data_lock); + + crash_data->crashed_since_read = true; + uuid_le_gen(&crash_data->uuid); + getnstimeofday(&crash_data->timestamp); + + return crash_data; +} +EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data); + +static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data; + struct ath10k_dump_file_data *dump_data; + struct ath10k_tlv_dump_data *dump_tlv; + int hdr_len = sizeof(*dump_data); + unsigned int len, sofar = 0; + unsigned char *buf; + + len = hdr_len; + len += sizeof(*dump_tlv) + sizeof(crash_data->registers); + + sofar += hdr_len; + + /* This is going to get big when we start dumping FW RAM and such, + * so go ahead and use vmalloc. + */ + buf = vzalloc(len); + if (!buf) + return NULL; + + spin_lock_bh(&ar->data_lock); + + if (!crash_data->crashed_since_read) { + spin_unlock_bh(&ar->data_lock); + vfree(buf); + return NULL; + } + + dump_data = (struct ath10k_dump_file_data *)(buf); + strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP", + sizeof(dump_data->df_magic)); + dump_data->len = cpu_to_le32(len); + + dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); + + memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid)); + dump_data->chip_id = cpu_to_le32(ar->chip_id); + dump_data->bus_type = cpu_to_le32(0); + dump_data->target_version = cpu_to_le32(ar->target_version); + dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major); + dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor); + dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release); + dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build); + dump_data->phy_capability = cpu_to_le32(ar->phy_capability); + dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power); + dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power); + dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info); + dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info); + dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains); + + strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, + sizeof(dump_data->fw_ver)); + + dump_data->kernel_ver_code = cpu_to_le32(LINUX_VERSION_CODE); + strlcpy(dump_data->kernel_ver, VERMAGIC_STRING, + sizeof(dump_data->kernel_ver)); + + dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); + dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec); + + /* Gather crash-dump */ + dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); + dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS); + dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers)); + memcpy(dump_tlv->tlv_data, &crash_data->registers, + sizeof(crash_data->registers)); + sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); + + ar->debug.fw_crash_data->crashed_since_read = false; + + spin_unlock_bh(&ar->data_lock); + + return dump_data; +} + +static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file) +{ + struct ath10k *ar = inode->i_private; + struct ath10k_dump_file_data *dump; + + dump = ath10k_build_dump_file(ar); + if (!dump) + return -ENODATA; + + file->private_data = dump; + + return 0; +} + +static ssize_t ath10k_fw_crash_dump_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k_dump_file_data *dump_file = file->private_data; + + return simple_read_from_buffer(user_buf, count, ppos, + dump_file, + le32_to_cpu(dump_file->len)); +} + +static int ath10k_fw_crash_dump_release(struct inode *inode, + struct file *file) +{ + vfree(file->private_data); + + return 0; +} + +static const struct file_operations fops_fw_crash_dump = { + .open = ath10k_fw_crash_dump_open, + .read = ath10k_fw_crash_dump_read, + .release = ath10k_fw_crash_dump_release, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static int ath10k_debug_htt_stats_req(struct ath10k *ar) { u64 cookie; @@ -596,7 +817,7 @@ static int ath10k_debug_htt_stats_req(struct ath10k *ar) ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask, cookie); if (ret) { - ath10k_warn("failed to send htt stats request: %d\n", ret); + ath10k_warn(ar, "failed to send htt stats request: %d\n", ret); return ret; } @@ -770,7 +991,7 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file, if (ar->state == ATH10K_STATE_ON) { ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); if (ret) { - ath10k_warn("dbglog cfg failed from debugfs: %d\n", + ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", ret); goto exit; } @@ -801,13 +1022,14 @@ int ath10k_debug_start(struct ath10k *ar) ret = ath10k_debug_htt_stats_req(ar); if (ret) /* continue normally anyway, this isn't serious */ - ath10k_warn("failed to start htt stats workqueue: %d\n", ret); + ath10k_warn(ar, "failed to start htt stats workqueue: %d\n", + ret); if (ar->debug.fw_dbglog_mask) { ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); if (ret) /* not serious */ - ath10k_warn("failed to enable dbglog during start: %d", + ath10k_warn(ar, "failed to enable dbglog during start: %d", ret); } @@ -910,11 +1132,20 @@ static const struct file_operations fops_dfs_stats = { int ath10k_debug_create(struct ath10k *ar) { + int ret; + + ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data)); + if (!ar->debug.fw_crash_data) { + ret = -ENOMEM; + goto err; + } + ar->debug.debugfs_phy = debugfs_create_dir("ath10k", ar->hw->wiphy->debugfsdir); - - if (!ar->debug.debugfs_phy) - return -ENOMEM; + if (!ar->debug.debugfs_phy) { + ret = -ENOMEM; + goto err_free_fw_crash_data; + } INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork, ath10k_debug_htt_stats_dwork); @@ -930,6 +1161,9 @@ int ath10k_debug_create(struct ath10k *ar) debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash); + debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy, + ar, &fops_fw_crash_dump); + debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy, ar, &fops_chip_id); @@ -958,17 +1192,25 @@ int ath10k_debug_create(struct ath10k *ar) } return 0; + +err_free_fw_crash_data: + vfree(ar->debug.fw_crash_data); + +err: + return ret; } void ath10k_debug_destroy(struct ath10k *ar) { + vfree(ar->debug.fw_crash_data); cancel_delayed_work_sync(&ar->debug.htt_stats_dwork); } #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG -void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...) +void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask, + const char *fmt, ...) { struct va_format vaf; va_list args; @@ -979,7 +1221,7 @@ void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...) vaf.va = &args; if (ath10k_debug_mask & mask) - ath10k_printk(KERN_DEBUG, "%pV", &vaf); + dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf); trace_ath10k_log_dbg(mask, &vaf); @@ -987,13 +1229,14 @@ void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...) } EXPORT_SYMBOL(ath10k_dbg); -void ath10k_dbg_dump(enum ath10k_debug_mask mask, +void ath10k_dbg_dump(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len) { if (ath10k_debug_mask & mask) { if (msg) - ath10k_dbg(mask, "%s\n", msg); + ath10k_dbg(ar, mask, "%s\n", msg); print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); } diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index a5824990bd2a..56746539bea2 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -39,9 +39,10 @@ enum ath10k_debug_mask { extern unsigned int ath10k_debug_mask; -__printf(1, 2) int ath10k_info(const char *fmt, ...); -__printf(1, 2) int ath10k_err(const char *fmt, ...); -__printf(1, 2) int ath10k_warn(const char *fmt, ...); +__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...); +__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...); +void ath10k_print_driver_info(struct ath10k *ar); #ifdef CONFIG_ATH10K_DEBUGFS int ath10k_debug_start(struct ath10k *ar); @@ -53,6 +54,10 @@ void ath10k_debug_read_service_map(struct ath10k *ar, size_t map_size); void ath10k_debug_read_target_stats(struct ath10k *ar, struct wmi_stats_event *ev); +struct ath10k_fw_crash_data * +ath10k_debug_get_new_fw_crash_data(struct ath10k *ar); + +void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len); #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++) @@ -86,25 +91,40 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar, { } +static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, + int len) +{ +} + +static inline struct ath10k_fw_crash_data * +ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) +{ + return NULL; +} + #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0) #endif /* CONFIG_ATH10K_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG -__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask, +__printf(3, 4) void ath10k_dbg(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *fmt, ...); -void ath10k_dbg_dump(enum ath10k_debug_mask mask, +void ath10k_dbg_dump(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len); #else /* CONFIG_ATH10K_DEBUG */ -static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask, +static inline int ath10k_dbg(struct ath10k *ar, + enum ath10k_debug_mask dbg_mask, const char *fmt, ...) { return 0; } -static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask, +static inline void ath10k_dbg_dump(struct ath10k *ar, + enum ath10k_debug_mask mask, const char *msg, const char *prefix, const void *buf, size_t len) { diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 5fdc40d3b378..fd9a251f0659 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -46,7 +46,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE); if (!skb) { - ath10k_warn("Unable to allocate ctrl skb\n"); + ath10k_warn(ar, "Unable to allocate ctrl skb\n"); return NULL; } @@ -56,7 +56,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar) skb_cb = ATH10K_SKB_CB(skb); memset(skb_cb, 0, sizeof(*skb_cb)); - ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); + ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb); return skb; } @@ -72,13 +72,15 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, + struct ath10k *ar = ep->htc->ar; + + ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__, ep->eid, skb); ath10k_htc_restore_tx_skb(ep->htc, skb); if (!ep->ep_ops.ep_tx_complete) { - ath10k_warn("no tx handler for eid %d\n", ep->eid); + ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid); dev_kfree_skb_any(skb); return; } @@ -89,12 +91,14 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep, /* assumes tx_lock is held */ static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep) { + struct ath10k *ar = ep->htc->ar; + if (!ep->tx_credit_flow_enabled) return false; if (ep->tx_credits >= ep->tx_credits_per_max_message) return false; - ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n", + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n", ep->eid); return true; } @@ -123,6 +127,7 @@ int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *skb) { + struct ath10k *ar = htc->ar; struct ath10k_htc_ep *ep = &htc->endpoint[eid]; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); struct ath10k_hif_sg_item sg_item; @@ -134,18 +139,10 @@ int ath10k_htc_send(struct ath10k_htc *htc, return -ECOMM; if (eid >= ATH10K_HTC_EP_COUNT) { - ath10k_warn("Invalid endpoint id: %d\n", eid); + ath10k_warn(ar, "Invalid endpoint id: %d\n", eid); return -ENOENT; } - /* FIXME: This looks ugly, can we fix it? */ - spin_lock_bh(&htc->tx_lock); - if (htc->stopped) { - spin_unlock_bh(&htc->tx_lock); - return -ESHUTDOWN; - } - spin_unlock_bh(&htc->tx_lock); - skb_push(skb, sizeof(struct ath10k_htc_hdr)); if (ep->tx_credit_flow_enabled) { @@ -157,7 +154,7 @@ int ath10k_htc_send(struct ath10k_htc *htc, goto err_pull; } ep->tx_credits -= credits; - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d consumed %d credits (total %d)\n", eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); @@ -188,7 +185,7 @@ err_credits: if (ep->tx_credit_flow_enabled) { spin_lock_bh(&htc->tx_lock); ep->tx_credits += credits; - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d reverted %d credits back (total %d)\n", eid, credits, ep->tx_credits); spin_unlock_bh(&htc->tx_lock); @@ -227,11 +224,12 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc, int len, enum ath10k_htc_ep_id eid) { + struct ath10k *ar = htc->ar; struct ath10k_htc_ep *ep; int i, n_reports; if (len % sizeof(*report)) - ath10k_warn("Uneven credit report len %d", len); + ath10k_warn(ar, "Uneven credit report len %d", len); n_reports = len / sizeof(*report); @@ -243,7 +241,7 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc, ep = &htc->endpoint[report->eid]; ep->tx_credits += report->credits; - ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n", report->eid, report->credits, ep->tx_credits); if (ep->ep_ops.ep_tx_credits) { @@ -260,6 +258,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, int length, enum ath10k_htc_ep_id src_eid) { + struct ath10k *ar = htc->ar; int status = 0; struct ath10k_htc_record *record; u8 *orig_buffer; @@ -279,7 +278,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, if (record->hdr.len > length) { /* no room left in buffer for record */ - ath10k_warn("Invalid record length: %d\n", + ath10k_warn(ar, "Invalid record length: %d\n", record->hdr.len); status = -EINVAL; break; @@ -289,7 +288,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, case ATH10K_HTC_RECORD_CREDITS: len = sizeof(struct ath10k_htc_credit_report); if (record->hdr.len < len) { - ath10k_warn("Credit report too long\n"); + ath10k_warn(ar, "Credit report too long\n"); status = -EINVAL; break; } @@ -299,7 +298,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, src_eid); break; default: - ath10k_warn("Unhandled record: id:%d length:%d\n", + ath10k_warn(ar, "Unhandled record: id:%d length:%d\n", record->hdr.id, record->hdr.len); break; } @@ -313,7 +312,7 @@ static int ath10k_htc_process_trailer(struct ath10k_htc *htc, } if (status) - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "", + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "", orig_buffer, orig_length); return status; @@ -339,8 +338,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, eid = hdr->eid; if (eid >= ATH10K_HTC_EP_COUNT) { - ath10k_warn("HTC Rx: invalid eid %d\n", eid); - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "", + ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid); + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "", hdr, sizeof(*hdr)); status = -EINVAL; goto out; @@ -360,19 +359,19 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, payload_len = __le16_to_cpu(hdr->len); if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) { - ath10k_warn("HTC rx frame too long, len: %zu\n", + ath10k_warn(ar, "HTC rx frame too long, len: %zu\n", payload_len + sizeof(*hdr)); - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "", + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", hdr, sizeof(*hdr)); status = -EINVAL; goto out; } if (skb->len < payload_len) { - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC Rx: insufficient length, got %d, expected %d\n", skb->len, payload_len); - ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", + ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "", hdr, sizeof(*hdr)); status = -EINVAL; goto out; @@ -388,7 +387,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, if ((trailer_len < min_len) || (trailer_len > payload_len)) { - ath10k_warn("Invalid trailer length: %d\n", + ath10k_warn(ar, "Invalid trailer length: %d\n", trailer_len); status = -EPROTO; goto out; @@ -421,7 +420,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, * this is a fatal error, target should not be * sending unsolicited messages on the ep 0 */ - ath10k_warn("HTC rx ctrl still processing\n"); + ath10k_warn(ar, "HTC rx ctrl still processing\n"); status = -EINVAL; complete(&htc->ctl_resp); goto out; @@ -442,7 +441,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar, goto out; } - ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", + ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n", eid, skb); ep->ep_ops.ep_rx_complete(ar, skb); @@ -459,7 +458,7 @@ static void ath10k_htc_control_rx_complete(struct ath10k *ar, { /* This is unexpected. FW is not supposed to send regular rx on this * endpoint. */ - ath10k_warn("unexpected htc rx\n"); + ath10k_warn(ar, "unexpected htc rx\n"); kfree_skb(skb); } @@ -546,6 +545,7 @@ static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc, int ath10k_htc_wait_target(struct ath10k_htc *htc) { + struct ath10k *ar = htc->ar; int i, status = 0; struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_resp conn_resp; @@ -563,7 +563,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) * iomap writes unmasking PCI CE irqs aren't propagated * properly in KVM PCI-passthrough sometimes. */ - ath10k_warn("failed to receive control response completion, polling..\n"); + ath10k_warn(ar, "failed to receive control response completion, polling..\n"); for (i = 0; i < CE_COUNT; i++) ath10k_hif_send_complete_check(htc->ar, i, 1); @@ -576,12 +576,12 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) } if (status < 0) { - ath10k_err("ctl_resp never came in (%d)\n", status); + ath10k_err(ar, "ctl_resp never came in (%d)\n", status); return status; } if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) { - ath10k_err("Invalid HTC ready msg len:%d\n", + ath10k_err(ar, "Invalid HTC ready msg len:%d\n", htc->control_resp_len); return -ECOMM; } @@ -592,21 +592,21 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) credit_size = __le16_to_cpu(msg->ready.credit_size); if (message_id != ATH10K_HTC_MSG_READY_ID) { - ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id); + ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id); return -ECOMM; } htc->total_transmit_credits = credit_count; htc->target_credit_size = credit_size; - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "Target ready! transmit resources: %d size:%d\n", htc->total_transmit_credits, htc->target_credit_size); if ((htc->total_transmit_credits == 0) || (htc->target_credit_size == 0)) { - ath10k_err("Invalid credit size received\n"); + ath10k_err(ar, "Invalid credit size received\n"); return -ECOMM; } @@ -623,7 +623,8 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc) /* connect fake service */ status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp); if (status) { - ath10k_err("could not connect to htc service (%d)\n", status); + ath10k_err(ar, "could not connect to htc service (%d)\n", + status); return status; } @@ -634,6 +635,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, struct ath10k_htc_svc_conn_req *conn_req, struct ath10k_htc_svc_conn_resp *conn_resp) { + struct ath10k *ar = htc->ar; struct ath10k_htc_msg *msg; struct ath10k_htc_conn_svc *req_msg; struct ath10k_htc_conn_svc_response resp_msg_dummy; @@ -659,13 +661,13 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, tx_alloc = ath10k_htc_get_credit_allocation(htc, conn_req->service_id); if (!tx_alloc) - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot htc service %s does not allocate target credits\n", htc_service_name(conn_req->service_id)); skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); if (!skb) { - ath10k_err("Failed to allocate HTC packet\n"); + ath10k_err(ar, "Failed to allocate HTC packet\n"); return -ENOMEM; } @@ -703,7 +705,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, if (status <= 0) { if (status == 0) status = -ETIMEDOUT; - ath10k_err("Service connect timeout: %d\n", status); + ath10k_err(ar, "Service connect timeout: %d\n", status); return status; } @@ -716,11 +718,11 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) || (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->connect_service_response))) { - ath10k_err("Invalid resp message ID 0x%x", message_id); + ath10k_err(ar, "Invalid resp message ID 0x%x", message_id); return -EPROTO; } - ath10k_dbg(ATH10K_DBG_HTC, + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n", htc_service_name(service_id), resp_msg->status, resp_msg->eid); @@ -729,7 +731,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, /* check response status */ if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) { - ath10k_err("HTC Service %s connect request failed: 0x%x)\n", + ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n", htc_service_name(service_id), resp_msg->status); return -EPROTO; @@ -780,18 +782,18 @@ setup: if (status) return status; - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n", htc_service_name(ep->service_id), ep->ul_pipe_id, ep->dl_pipe_id, ep->eid); - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot htc ep %d ul polled %d dl polled %d\n", ep->eid, ep->ul_is_polled, ep->dl_is_polled); if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) { ep->tx_credit_flow_enabled = false; - ath10k_dbg(ATH10K_DBG_BOOT, + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot htc service '%s' eid %d TX flow control disabled\n", htc_service_name(ep->service_id), assigned_eid); } @@ -799,13 +801,13 @@ setup: return status; } -struct sk_buff *ath10k_htc_alloc_skb(int size) +struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size) { struct sk_buff *skb; skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr)); if (!skb) { - ath10k_warn("could not allocate HTC tx skb\n"); + ath10k_warn(ar, "could not allocate HTC tx skb\n"); return NULL; } @@ -813,13 +815,14 @@ struct sk_buff *ath10k_htc_alloc_skb(int size) /* FW/HTC requires 4-byte aligned streams */ if (!IS_ALIGNED((unsigned long)skb->data, 4)) - ath10k_warn("Unaligned HTC tx skb\n"); + ath10k_warn(ar, "Unaligned HTC tx skb\n"); return skb; } int ath10k_htc_start(struct ath10k_htc *htc) { + struct ath10k *ar = htc->ar; struct sk_buff *skb; int status = 0; struct ath10k_htc_msg *msg; @@ -835,7 +838,7 @@ int ath10k_htc_start(struct ath10k_htc *htc) msg->hdr.message_id = __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); - ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); + ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); if (status) { @@ -846,13 +849,6 @@ int ath10k_htc_start(struct ath10k_htc *htc) return 0; } -void ath10k_htc_stop(struct ath10k_htc *htc) -{ - spin_lock_bh(&htc->tx_lock); - htc->stopped = true; - spin_unlock_bh(&htc->tx_lock); -} - /* registered target arrival callback from the HIF layer */ int ath10k_htc_init(struct ath10k *ar) { @@ -862,7 +858,6 @@ int ath10k_htc_init(struct ath10k *ar) spin_lock_init(&htc->tx_lock); - htc->stopped = false; ath10k_htc_reset_endpoint_states(htc); /* setup HIF layer callbacks */ diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 4716d331e6b6..bf532f671189 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -332,7 +332,7 @@ struct ath10k_htc { struct ath10k *ar; struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT]; - /* protects endpoint and stopped fields */ + /* protects endpoints */ spinlock_t tx_lock; struct ath10k_htc_ops htc_ops; @@ -345,8 +345,6 @@ struct ath10k_htc { int total_transmit_credits; struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT]; int target_credit_size; - - bool stopped; }; int ath10k_htc_init(struct ath10k *ar); @@ -357,7 +355,6 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, struct ath10k_htc_svc_conn_resp *conn_resp); int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid, struct sk_buff *packet); -void ath10k_htc_stop(struct ath10k_htc *htc); -struct sk_buff *ath10k_htc_alloc_skb(int size); +struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size); #endif diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 19c12cc8d663..87daae11f116 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -74,12 +74,14 @@ int ath10k_htt_init(struct ath10k *ar) static int ath10k_htt_verify_version(struct ath10k_htt *htt) { - ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n", + struct ath10k *ar = htt->ar; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n", htt->target_version_major, htt->target_version_minor); if (htt->target_version_major != 2 && htt->target_version_major != 3) { - ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n", + ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n", htt->target_version_major); return -ENOTSUPP; } @@ -89,6 +91,7 @@ static int ath10k_htt_verify_version(struct ath10k_htt *htt) int ath10k_htt_setup(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; int status; init_completion(&htt->target_version_received); @@ -100,7 +103,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt) status = wait_for_completion_timeout(&htt->target_version_received, HTT_TARGET_VERSION_TIMEOUT_HZ); if (status <= 0) { - ath10k_warn("htt version request timed out\n"); + ath10k_warn(ar, "htt version request timed out\n"); return -ETIMEDOUT; } diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 80cdac15588a..30927b1d7109 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -271,13 +271,14 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; int idx; struct sk_buff *msdu; lockdep_assert_held(&htt->rx_ring.lock); if (htt->rx_ring.fill_cnt == 0) { - ath10k_warn("tried to pop sk_buff from an empty rx ring\n"); + ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n"); return NULL; } @@ -311,6 +312,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, struct sk_buff **tail_msdu, u32 *attention) { + struct ath10k *ar = htt->ar; int msdu_len, msdu_chaining = 0; struct sk_buff *msdu; struct htt_rx_desc *rx_desc; @@ -318,7 +320,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, lockdep_assert_held(&htt->rx_ring.lock); if (htt->rx_confused) { - ath10k_warn("htt is confused. refusing rx\n"); + ath10k_warn(ar, "htt is confused. refusing rx\n"); return -1; } @@ -331,7 +333,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, msdu->len + skb_tailroom(msdu), DMA_FROM_DEVICE); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ", msdu->data, msdu->len + skb_tailroom(msdu)); rx_desc = (struct htt_rx_desc *)msdu->data; @@ -354,7 +356,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, ath10k_htt_rx_free_msdu_chain(*head_msdu); *head_msdu = NULL; msdu = NULL; - ath10k_err("htt rx stopped. cannot recover\n"); + ath10k_err(ar, "htt rx stopped. cannot recover\n"); htt->rx_confused = true; break; } @@ -429,7 +431,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt, next->len + skb_tailroom(next), DMA_FROM_DEVICE); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx chained: ", next->data, next->len + skb_tailroom(next)); @@ -483,13 +485,14 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr) int ath10k_htt_rx_alloc(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; dma_addr_t paddr; void *vaddr; struct timer_list *timer = &htt->rx_ring.refill_retry_timer; htt->rx_ring.size = ath10k_htt_rx_ring_size(htt); if (!is_power_of_2(htt->rx_ring.size)) { - ath10k_warn("htt rx ring size is not power of 2\n"); + ath10k_warn(ar, "htt rx ring size is not power of 2\n"); return -EINVAL; } @@ -550,7 +553,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, (unsigned long)htt); - ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n", htt->rx_ring.size, htt->rx_ring.fill_level); return 0; @@ -572,7 +575,8 @@ err_netbuf: return -ENOMEM; } -static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) +static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, + enum htt_rx_mpdu_encrypt_type type) { switch (type) { case HTT_RX_MPDU_ENCRYPT_WEP40: @@ -588,11 +592,12 @@ static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type) return 0; } - ath10k_warn("unknown encryption type %d\n", type); + ath10k_warn(ar, "unknown encryption type %d\n", type); return 0; } -static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) +static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, + enum htt_rx_mpdu_encrypt_type type) { switch (type) { case HTT_RX_MPDU_ENCRYPT_NONE: @@ -608,7 +613,7 @@ static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type) return 8; } - ath10k_warn("unknown encryption type %d\n", type); + ath10k_warn(ar, "unknown encryption type %d\n", type); return 0; } @@ -819,19 +824,55 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar, return true; } +static const char * const tid_to_ac[] = { + "BE", + "BK", + "BK", + "BE", + "VI", + "VI", + "VO", + "VO", +}; + +static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) +{ + u8 *qc; + int tid; + + if (!ieee80211_is_data_qos(hdr->frame_control)) + return ""; + + qc = ieee80211_get_qos_ctl(hdr); + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + if (tid < 8) + snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]); + else + snprintf(out, size, "tid %d", tid); + + return out; +} + static void ath10k_process_rx(struct ath10k *ar, struct ieee80211_rx_status *rx_status, struct sk_buff *skb) { struct ieee80211_rx_status *status; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + char tid[32]; status = IEEE80211_SKB_RXCB(skb); *status = *rx_status; - ath10k_dbg(ATH10K_DBG_DATA, - "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n", + ath10k_dbg(ar, ATH10K_DBG_DATA, + "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", skb, skb->len, + ieee80211_get_SA(hdr), + ath10k_get_tid(hdr, tid, sizeof(tid)), + is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? + "mcast" : "ucast", + (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, status->flag == 0 ? "legacy" : "", status->flag & RX_FLAG_HT ? "ht" : "", status->flag & RX_FLAG_VHT ? "vht" : "", @@ -843,8 +884,9 @@ static void ath10k_process_rx(struct ath10k *ar, status->freq, status->band, status->flag, !!(status->flag & RX_FLAG_FAILED_FCS_CRC), - !!(status->flag & RX_FLAG_MMIC_ERROR)); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", + !!(status->flag & RX_FLAG_MMIC_ERROR), + !!(status->flag & RX_FLAG_AMSDU_MORE)); + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ", skb->data, skb->len); ieee80211_rx(ar->hw, skb); @@ -860,13 +902,14 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, struct ieee80211_rx_status *rx_status, struct sk_buff *skb_in) { + struct ath10k *ar = htt->ar; struct htt_rx_desc *rxd; struct sk_buff *skb = skb_in; struct sk_buff *first; enum rx_msdu_decap_format fmt; enum htt_rx_mpdu_encrypt_type enctype; struct ieee80211_hdr *hdr; - u8 hdr_buf[64], addr[ETH_ALEN], *qos; + u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos; unsigned int hdr_len; rxd = (void *)skb->data - sizeof(*rxd); @@ -893,8 +936,8 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, /* First frame in an A-MSDU chain has more decapped data. */ if (skb == first) { len = round_up(ieee80211_hdrlen(hdr->frame_control), 4); - len += round_up(ath10k_htt_rx_crypto_param_len(enctype), - 4); + len += round_up(ath10k_htt_rx_crypto_param_len(ar, + enctype), 4); decap_hdr += len; } @@ -904,10 +947,11 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, skb_trim(skb, skb->len - FCS_LEN); break; case RX_MSDU_DECAP_NATIVE_WIFI: - /* pull decapped header and copy DA */ + /* pull decapped header and copy SA & DA */ hdr = (struct ieee80211_hdr *)skb->data; hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr); - memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN); + memcpy(da, ieee80211_get_DA(hdr), ETH_ALEN); + memcpy(sa, ieee80211_get_SA(hdr), ETH_ALEN); skb_pull(skb, hdr_len); /* push original 802.11 header */ @@ -921,8 +965,11 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt, qos = ieee80211_get_qos_ctl(hdr); qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; - /* original 802.11 header has a different DA */ - memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN); + /* original 802.11 header has a different DA and in + * case of 4addr it may also have different SA + */ + memcpy(ieee80211_get_DA(hdr), da, ETH_ALEN); + memcpy(ieee80211_get_SA(hdr), sa, ETH_ALEN); break; case RX_MSDU_DECAP_ETHERNET2_DIX: /* strip ethernet header and insert decapped 802.11 @@ -965,6 +1012,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct ieee80211_rx_status *rx_status, struct sk_buff *skb) { + struct ath10k *ar = htt->ar; struct htt_rx_desc *rxd; struct ieee80211_hdr *hdr; enum rx_msdu_decap_format fmt; @@ -974,7 +1022,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, /* This shouldn't happen. If it does than it may be a FW bug. */ if (skb->next) { - ath10k_warn("htt rx received chained non A-MSDU frame\n"); + ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n"); ath10k_htt_rx_free_msdu_chain(skb->next); skb->next = NULL; } @@ -1011,7 +1059,8 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, rfc1042 = hdr; rfc1042 += roundup(hdr_len, 4); - rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4); + rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar, + enctype), 4); skb_pull(skb, sizeof(struct ethhdr)); memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)), @@ -1120,27 +1169,29 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, bool channel_set, u32 attention) { + struct ath10k *ar = htt->ar; + if (head->len == 0) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx dropping due to zero-len\n"); return false; } if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx dropping due to decrypt-err\n"); return false; } if (!channel_set) { - ath10k_warn("no channel configured; ignoring frame!\n"); + ath10k_warn(ar, "no channel configured; ignoring frame!\n"); return false; } /* Skip mgmt frames while we handle this in WMI */ if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL || attention & RX_ATTENTION_FLAGS_MGMT_TYPE) { - ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n"); return false; } @@ -1148,14 +1199,14 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR && status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER && !htt->ar->monitor_started) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx ignoring frame w/ status %d\n", status); return false; } if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx CAC running\n"); return false; } @@ -1166,6 +1217,7 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt, static void ath10k_htt_rx_handler(struct ath10k_htt *htt, struct htt_rx_indication *rx) { + struct ath10k *ar = htt->ar; struct ieee80211_rx_status *rx_status = &htt->rx_status; struct htt_rx_indication_mpdu_range *mpdu_ranges; struct htt_rx_desc *rxd; @@ -1211,7 +1263,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, rx_status); } - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ", rx, sizeof(*rx) + (sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges)); @@ -1233,7 +1285,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, &attention); if (ret < 0) { - ath10k_warn("failed to pop amsdu from htt rx ring %d\n", + ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n", ret); ath10k_htt_rx_free_msdu_chain(msdu_head); continue; @@ -1282,6 +1334,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt, static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, struct htt_rx_fragment_indication *frag) { + struct ath10k *ar = htt->ar; struct sk_buff *msdu_head, *msdu_tail; enum htt_rx_mpdu_encrypt_type enctype; struct htt_rx_desc *rxd; @@ -1308,10 +1361,10 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, &attention); spin_unlock_bh(&htt->rx_ring.lock); - ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); + ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n"); if (ret) { - ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n", + ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n", ret); ath10k_htt_rx_free_msdu_chain(msdu_head); return; @@ -1328,7 +1381,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, RX_MSDU_START_INFO1_DECAP_FORMAT); if (fmt != RX_MSDU_DECAP_RAW) { - ath10k_warn("we dont support non-raw fragmented rx yet\n"); + ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n"); dev_kfree_skb_any(msdu_head); goto end; } @@ -1340,17 +1393,17 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head); if (tkip_mic_err) - ath10k_warn("tkip mic error\n"); + ath10k_warn(ar, "tkip mic error\n"); if (decrypt_err) { - ath10k_warn("decryption err in fragmented rx\n"); + ath10k_warn(ar, "decryption err in fragmented rx\n"); dev_kfree_skb_any(msdu_head); goto end; } if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) { hdrlen = ieee80211_hdrlen(hdr->frame_control); - paramlen = ath10k_htt_rx_crypto_param_len(enctype); + paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype); /* It is more efficient to move the header than the payload */ memmove((void *)msdu_head->data + paramlen, @@ -1364,7 +1417,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, trim = 4; /* remove crypto trailer */ - trim += ath10k_htt_rx_crypto_tail_len(enctype); + trim += ath10k_htt_rx_crypto_tail_len(ar, enctype); /* last fragment of TKIP frags has MIC */ if (!ieee80211_has_morefrags(hdr->frame_control) && @@ -1372,20 +1425,20 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt, trim += 8; if (trim > msdu_head->len) { - ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n"); + ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n"); dev_kfree_skb_any(msdu_head); goto end; } skb_trim(msdu_head, msdu_head->len - trim); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ", msdu_head->data, msdu_head->len); ath10k_process_rx(htt->ar, rx_status, msdu_head); end: if (fw_desc_len > 0) { - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "expecting more fragmented rx in one indication %d\n", fw_desc_len); } @@ -1415,12 +1468,12 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar, tx_done.discard = true; break; default: - ath10k_warn("unhandled tx completion status %d\n", status); + ath10k_warn(ar, "unhandled tx completion status %d\n", status); tx_done.discard = true; break; } - ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n", resp->data_tx_completion.num_msdus); for (i = 0; i < resp->data_tx_completion.num_msdus; i++) { @@ -1441,14 +1494,14 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) tid = MS(info0, HTT_RX_BA_INFO0_TID); peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx addba tid %hu peer_id %hu size %hhu\n", tid, peer_id, ev->window_size); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); if (!peer) { - ath10k_warn("received addba event for invalid peer_id: %hu\n", + ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", peer_id); spin_unlock_bh(&ar->data_lock); return; @@ -1456,13 +1509,13 @@ static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) arvif = ath10k_get_arvif(ar, peer->vdev_id); if (!arvif) { - ath10k_warn("received addba event for invalid vdev_id: %u\n", + ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", peer->vdev_id); spin_unlock_bh(&ar->data_lock); return; } - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx start rx ba session sta %pM tid %hu size %hhu\n", peer->addr, tid, ev->window_size); @@ -1481,14 +1534,14 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) tid = MS(info0, HTT_RX_BA_INFO0_TID); peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID); - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx delba tid %hu peer_id %hu\n", tid, peer_id); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); if (!peer) { - ath10k_warn("received addba event for invalid peer_id: %hu\n", + ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n", peer_id); spin_unlock_bh(&ar->data_lock); return; @@ -1496,13 +1549,13 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) arvif = ath10k_get_arvif(ar, peer->vdev_id); if (!arvif) { - ath10k_warn("received addba event for invalid vdev_id: %u\n", + ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n", peer->vdev_id); spin_unlock_bh(&ar->data_lock); return; } - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx stop rx ba session sta %pM tid %hu\n", peer->addr, tid); @@ -1517,9 +1570,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) /* confirm alignment */ if (!IS_ALIGNED((unsigned long)skb->data, 4)) - ath10k_warn("unaligned htt message, expect trouble\n"); + ath10k_warn(ar, "unaligned htt message, expect trouble\n"); - ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n", resp->hdr.msg_type); switch (resp->hdr.msg_type) { case HTT_T2H_MSG_TYPE_VERSION_CONF: { @@ -1583,7 +1636,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) struct ath10k *ar = htt->ar; struct htt_security_indication *ev = &resp->security_indication; - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "sec ind peer_id %d unicast %d type %d\n", __le16_to_cpu(ev->peer_id), !!(ev->flags & HTT_SECURITY_IS_UNICAST), @@ -1592,7 +1645,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_FRAG_IND: { - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind); break; @@ -1609,7 +1662,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) * sends all tx frames as already inspected so this shouldn't * happen unless fw has a bug. */ - ath10k_warn("received an unexpected htt tx inspect event\n"); + ath10k_warn(ar, "received an unexpected htt tx inspect event\n"); break; case HTT_T2H_MSG_TYPE_RX_ADDBA: ath10k_htt_rx_addba(ar, resp); @@ -1624,9 +1677,9 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } default: - ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n", resp->hdr.msg_type); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", skb->data, skb->len); break; }; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 8b27bfcc1de3..eaa73aa99c20 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -58,6 +58,7 @@ exit: int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; int msdu_id; lockdep_assert_held(&htt->tx_lock); @@ -67,24 +68,29 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) if (msdu_id == htt->max_num_pending_tx) return -ENOBUFS; - ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); __set_bit(msdu_id, htt->used_msdu_ids); return msdu_id; } void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) { + struct ath10k *ar = htt->ar; + lockdep_assert_held(&htt->tx_lock); if (!test_bit(msdu_id, htt->used_msdu_ids)) - ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id); + ath10k_warn(ar, "trying to free unallocated msdu_id %d\n", + msdu_id); - ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); __clear_bit(msdu_id, htt->used_msdu_ids); } int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; + spin_lock_init(&htt->tx_lock); init_waitqueue_head(&htt->empty_tx_wq); @@ -93,7 +99,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) else htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC; - ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * @@ -122,6 +128,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; struct htt_tx_done tx_done = {0}; int msdu_id; @@ -130,7 +137,7 @@ static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) if (!test_bit(msdu_id, htt->used_msdu_ids)) continue; - ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); tx_done.discard = 1; @@ -157,6 +164,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0; @@ -165,7 +173,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) len += sizeof(cmd->hdr); len += sizeof(cmd->ver_req); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -184,6 +192,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) { + struct ath10k *ar = htt->ar; struct htt_stats_req *req; struct sk_buff *skb; struct htt_cmd *cmd; @@ -192,7 +201,7 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) len += sizeof(cmd->hdr); len += sizeof(cmd->stats_req); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -214,7 +223,8 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { - ath10k_warn("failed to send htt type stats request: %d", ret); + ath10k_warn(ar, "failed to send htt type stats request: %d", + ret); dev_kfree_skb_any(skb); return ret; } @@ -224,6 +234,7 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) { + struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring *ring; @@ -242,7 +253,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) + (sizeof(*ring) * num_rx_ring); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -311,6 +322,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) { + struct ath10k *ar = htt->ar; struct htt_aggr_conf *aggr_conf; struct sk_buff *skb; struct htt_cmd *cmd; @@ -328,7 +340,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, len = sizeof(cmd->hdr); len += sizeof(cmd->aggr_conf); - skb = ath10k_htc_alloc_skb(len); + skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -340,7 +352,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; - ath10k_dbg(ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", aggr_conf->max_num_amsdu_subframes, aggr_conf->max_num_ampdu_subframes); @@ -355,7 +367,8 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { - struct device *dev = htt->ar->dev; + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); @@ -382,7 +395,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) htt->pending_tx[msdu_id] = msdu; spin_unlock_bh(&htt->tx_lock); - txdesc = ath10k_htc_alloc_skb(len); + txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; @@ -429,7 +442,8 @@ err: int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { - struct device *dev = htt->ar->dev; + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; @@ -545,11 +559,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID); - ath10k_dbg(ATH10K_DBG_HTT, + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n", flags0, flags1, msdu->len, msdu_id, frags_paddr, (u32)skb_cb->paddr, vdev_id, tid); - ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", + ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); sg_items[0].transfer_id = 0; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 007e855f4ba9..13568b01de9f 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -28,16 +28,19 @@ #define QCA988X_HW_2_0_CHIP_ID_REV 0x2 #define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0" #define QCA988X_HW_2_0_FW_FILE "firmware.bin" -#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin" +#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin" #define QCA988X_HW_2_0_OTP_FILE "otp.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 #define ATH10K_FW_API2_FILE "firmware-2.bin" +#define ATH10K_FW_API3_FILE "firmware-3.bin" /* includes also the null byte */ #define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K" +#define REG_DUMP_COUNT_QCA988X 60 + struct ath10k_fw_ie { __le32 id; __le32 len; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 9d61bb157189..b858c8288196 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -36,6 +36,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif, enum set_key_cmd cmd, const u8 *macaddr) { + struct ath10k *ar = arvif->ar; struct wmi_vdev_install_key_arg arg = { .vdev_id = arvif->vdev_id, .key_idx = key->keyidx, @@ -73,7 +74,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif, arg.key_flags = WMI_KEY_PAIRWISE; break; default: - ath10k_warn("cipher %d is not supported\n", key->cipher); + ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); return -EOPNOTSUPP; } @@ -168,7 +169,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, first_errno = ret; if (ret) - ath10k_warn("failed to remove peer wep key %d: %d\n", + ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", i, ret); peer->keys[i] = NULL; @@ -216,7 +217,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, first_errno = ret; if (ret) - ath10k_warn("failed to remove key for %pM: %d\n", + ath10k_warn(ar, "failed to remove key for %pM: %d\n", addr, ret); } @@ -327,14 +328,14 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr) ret = ath10k_wmi_peer_create(ar, vdev_id, addr); if (ret) { - ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n", + ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", addr, vdev_id, ret); return ret; } ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); if (ret) { - ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n", + ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", addr, vdev_id, ret); return ret; } @@ -355,7 +356,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) ret = ath10k_wmi_pdev_set_param(ar, param, ATH10K_KICKOUT_THRESHOLD); if (ret) { - ath10k_warn("failed to set kickout threshold on vdev %i: %d\n", + ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -364,7 +365,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, ATH10K_KEEPALIVE_MIN_IDLE); if (ret) { - ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n", + ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -373,7 +374,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, ATH10K_KEEPALIVE_MAX_IDLE); if (ret) { - ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n", + ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -382,7 +383,7 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); if (ret) { - ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n", + ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -449,7 +450,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) if (peer->vdev_id != vdev_id) continue; - ath10k_warn("removing stale peer %pM from vdev_id %d\n", + ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", peer->addr, vdev_id); list_del(&peer->list); @@ -496,7 +497,7 @@ static bool ath10k_monitor_is_enabled(struct ath10k *ar) { lockdep_assert_held(&ar->conf_mutex); - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor refs: promisc %d monitor %d cac %d\n", ar->promisc, ar->monitor, test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)); @@ -531,35 +532,35 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) ret = ath10k_wmi_vdev_start(ar, &arg); if (ret) { - ath10k_warn("failed to request monitor vdev %i start: %d\n", + ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", vdev_id, ret); return ret; } ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i: %d\n", vdev_id, ret); return ret; } ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); if (ret) { - ath10k_warn("failed to put up monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", vdev_id, ret); goto vdev_stop; } ar->monitor_vdev_id = vdev_id; - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", ar->monitor_vdev_id); return 0; vdev_stop: ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) - ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n", + ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", ar->monitor_vdev_id, ret); return ret; @@ -573,20 +574,20 @@ static int ath10k_monitor_vdev_stop(struct ath10k *ar) ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); if (ret) - ath10k_warn("failed to put down monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", ar->monitor_vdev_id, ret); ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); if (ret) - ath10k_warn("failed to to request monitor vdev %i stop: %d\n", + ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", ar->monitor_vdev_id, ret); ret = ath10k_vdev_setup_sync(ar); if (ret) - ath10k_warn("failed to synchronise monitor vdev %i: %d\n", + ath10k_warn(ar, "failed to synchronise monitor vdev %i: %d\n", ar->monitor_vdev_id, ret); - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", ar->monitor_vdev_id); return ret; } @@ -597,35 +598,29 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - bit = ffs(ar->free_vdev_map); - if (bit == 0) { - ath10k_warn("failed to find free vdev id for monitor vdev\n"); + if (ar->free_vdev_map == 0) { + ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); return -ENOMEM; } + bit = ffs(ar->free_vdev_map); + ar->monitor_vdev_id = bit - 1; - ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, WMI_VDEV_TYPE_MONITOR, 0, ar->mac_addr); if (ret) { - ath10k_warn("failed to request monitor vdev %i creation: %d\n", + ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", ar->monitor_vdev_id, ret); - goto vdev_fail; + return ret; } - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n", + ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", ar->monitor_vdev_id); return 0; - -vdev_fail: - /* - * Restore the ID to the global map. - */ - ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); - return ret; } static int ath10k_monitor_vdev_delete(struct ath10k *ar) @@ -636,14 +631,14 @@ static int ath10k_monitor_vdev_delete(struct ath10k *ar) ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); if (ret) { - ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n", + ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", ar->monitor_vdev_id, ret); return ret; } - ar->free_vdev_map |= 1 << (ar->monitor_vdev_id); + ar->free_vdev_map |= 1 << ar->monitor_vdev_id; - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", ar->monitor_vdev_id); return ret; } @@ -655,30 +650,30 @@ static int ath10k_monitor_start(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); if (!ath10k_monitor_is_enabled(ar)) { - ath10k_warn("trying to start monitor with no references\n"); + ath10k_warn(ar, "trying to start monitor with no references\n"); return 0; } if (ar->monitor_started) { - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor already started\n"); return 0; } ret = ath10k_monitor_vdev_create(ar); if (ret) { - ath10k_warn("failed to create monitor vdev: %d\n", ret); + ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); return ret; } ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); if (ret) { - ath10k_warn("failed to start monitor vdev: %d\n", ret); + ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); ath10k_monitor_vdev_delete(ar); return ret; } ar->monitor_started = true; - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); return 0; } @@ -690,27 +685,27 @@ static void ath10k_monitor_stop(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); if (ath10k_monitor_is_enabled(ar)) { - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor will be stopped later\n"); return; } if (!ar->monitor_started) { - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor probably failed to start earlier\n"); return; } ret = ath10k_monitor_vdev_stop(ar); if (ret) - ath10k_warn("failed to stop monitor vdev: %d\n", ret); + ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); ret = ath10k_monitor_vdev_delete(ar); if (ret) - ath10k_warn("failed to delete monitor vdev: %d\n", ret); + ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); ar->monitor_started = false; - ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); } static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) @@ -743,12 +738,12 @@ static int ath10k_start_cac(struct ath10k *ar) ret = ath10k_monitor_start(ar); if (ret) { - ath10k_warn("failed to start monitor (cac): %d\n", ret); + ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); return ret; } - ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", ar->monitor_vdev_id); return 0; @@ -765,7 +760,7 @@ static int ath10k_stop_cac(struct ath10k *ar) clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); ath10k_monitor_stop(ar); - ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); return 0; } @@ -791,12 +786,12 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar) * radiation is not allowed, make this channel DFS_UNAVAILABLE * by indicating that radar was detected. */ - ath10k_warn("failed to start CAC: %d\n", ret); + ath10k_warn(ar, "failed to start CAC: %d\n", ret); ieee80211_radar_detected(ar->hw); } } -static int ath10k_vdev_start(struct ath10k_vif *arvif) +static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart) { struct ath10k *ar = arvif->ar; struct cfg80211_chan_def *chandef = &ar->chandef; @@ -833,21 +828,25 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif) arg.ssid_len = arvif->vif->bss_conf.ssid_len; } - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d start center_freq %d phymode %s\n", arg.vdev_id, arg.channel.freq, ath10k_wmi_phymode_str(arg.channel.mode)); - ret = ath10k_wmi_vdev_start(ar, &arg); + if (restart) + ret = ath10k_wmi_vdev_restart(ar, &arg); + else + ret = ath10k_wmi_vdev_start(ar, &arg); + if (ret) { - ath10k_warn("failed to start WMI vdev %i: %d\n", + ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", arg.vdev_id, ret); return ret; } ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn("failed to synchronise setup for vdev %i: %d\n", + ath10k_warn(ar, "failed to synchronise setup for vdev %i: %d\n", arg.vdev_id, ret); return ret; } @@ -858,6 +857,16 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif) return ret; } +static int ath10k_vdev_start(struct ath10k_vif *arvif) +{ + return ath10k_vdev_start_restart(arvif, false); +} + +static int ath10k_vdev_restart(struct ath10k_vif *arvif) +{ + return ath10k_vdev_start_restart(arvif, true); +} + static int ath10k_vdev_stop(struct ath10k_vif *arvif) { struct ath10k *ar = arvif->ar; @@ -869,14 +878,14 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif) ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); if (ret) { - ath10k_warn("failed to stop WMI vdev %i: %d\n", + ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", arvif->vdev_id, ret); return ret; } ret = ath10k_vdev_setup_sync(ar); if (ret) { - ath10k_warn("failed to syncronise setup for vdev %i: %d\n", + ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -894,6 +903,7 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif) static void ath10k_control_beaconing(struct ath10k_vif *arvif, struct ieee80211_bss_conf *info) { + struct ath10k *ar = arvif->ar; int ret = 0; lockdep_assert_held(&arvif->ar->conf_mutex); @@ -931,7 +941,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif, ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid); if (ret) { - ath10k_warn("failed to bring up vdev %d: %i\n", + ath10k_warn(ar, "failed to bring up vdev %d: %i\n", arvif->vdev_id, ret); ath10k_vdev_stop(arvif); return; @@ -940,13 +950,14 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif, arvif->is_started = true; arvif->is_up = true; - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); } static void ath10k_control_ibss(struct ath10k_vif *arvif, struct ieee80211_bss_conf *info, const u8 self_peer[ETH_ALEN]) { + struct ath10k *ar = arvif->ar; u32 vdev_param; int ret = 0; @@ -955,7 +966,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, if (!info->ibss_joined) { ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer); if (ret) - ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n", + ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n", self_peer, arvif->vdev_id, ret); if (is_zero_ether_addr(arvif->bssid)) @@ -964,7 +975,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, arvif->bssid); if (ret) { - ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n", + ath10k_warn(ar, "failed to delete IBSS BSSID peer %pM for vdev %d: %d\n", arvif->bssid, arvif->vdev_id, ret); return; } @@ -976,7 +987,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer); if (ret) { - ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n", + ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n", self_peer, arvif->vdev_id, ret); return; } @@ -985,7 +996,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif, ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, ATH10K_DEFAULT_ATIM); if (ret) - ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n", + ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", arvif->vdev_id, ret); } @@ -1012,7 +1023,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, conf->dynamic_ps_timeout); if (ret) { - ath10k_warn("failed to set inactivity time for vdev %d: %i\n", + ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", arvif->vdev_id, ret); return ret; } @@ -1020,12 +1031,12 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) psmode = WMI_STA_PS_MODE_DISABLED; } - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", arvif->vdev_id, psmode ? "enable" : "disable"); ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); if (ret) { - ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n", + ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", psmode, arvif->vdev_id, ret); return ret; } @@ -1109,12 +1120,12 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, /* FIXME: base on RSN IE/WPA IE is a correct idea? */ if (rsnie || wpaie) { - ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); + ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; } if (wpaie) { - ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); + ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; } } @@ -1223,7 +1234,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar, arg->peer_num_spatial_streams = sta->rx_nss; } - ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", arg->addr, arg->peer_ht_rates.num_rates, arg->peer_num_spatial_streams); @@ -1240,7 +1251,7 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, lockdep_assert_held(&ar->conf_mutex); if (sta->wme && sta->uapsd_queues) { - ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", sta->uapsd_queues, sta->max_sp); if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) @@ -1265,7 +1276,7 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, WMI_AP_PS_PEER_PARAM_UAPSD, uapsd); if (ret) { - ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n", + ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -1275,7 +1286,7 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, WMI_AP_PS_PEER_PARAM_MAX_SP, max_sp); if (ret) { - ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n", + ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -1287,7 +1298,7 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10); if (ret) { - ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n", + ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -1334,7 +1345,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar, arg->peer_vht_rates.tx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); - ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", sta->addr, arg->peer_max_mpdu, arg->peer_flags); } @@ -1407,7 +1418,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, break; } - ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", sta->addr, ath10k_wmi_phymode_str(phymode)); arg->peer_phymode = phymode; @@ -1480,7 +1491,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!ap_sta) { - ath10k_warn("failed to find station entry for bss %pM vdev %i\n", + ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", bss_conf->bssid, arvif->vdev_id); rcu_read_unlock(); return; @@ -1493,7 +1504,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta, bss_conf, &peer_arg); if (ret) { - ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n", + ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); rcu_read_unlock(); return; @@ -1503,19 +1514,19 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, ret = ath10k_wmi_peer_assoc(ar, &peer_arg); if (ret) { - ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n", + ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", bss_conf->bssid, arvif->vdev_id, ret); return; } ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); if (ret) { - ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n", + ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", arvif->vdev_id, ret); return; } - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up (associated) bssid %pM aid %d\n", arvif->vdev_id, bss_conf->bssid, bss_conf->aid); @@ -1524,7 +1535,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw, ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); if (ret) { - ath10k_warn("failed to set vdev %d up: %d\n", + ath10k_warn(ar, "failed to set vdev %d up: %d\n", arvif->vdev_id, ret); return; } @@ -1550,7 +1561,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, * No idea why this happens, even though VDEV-DOWN is supposed * to be analogous to link down, so just stop the VDEV. */ - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n", arvif->vdev_id); /* FIXME: check return value */ @@ -1563,7 +1574,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw, * interfaces as it expects there is no rx when no interface is * running. */ - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id); /* FIXME: why don't we print error if wmi call fails? */ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); @@ -1584,7 +1595,7 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg); if (ret) { - ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n", + ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); return ret; } @@ -1592,14 +1603,14 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, peer_arg.peer_reassoc = reassoc; ret = ath10k_wmi_peer_assoc(ar, &peer_arg); if (ret) { - ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n", + ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", sta->addr, arvif->vdev_id, ret); return ret; } ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap); if (ret) { - ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n", + ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } @@ -1608,7 +1619,7 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, arvif->num_legacy_stations++; ret = ath10k_recalc_rtscts_prot(arvif); if (ret) { - ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n", + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } @@ -1616,14 +1627,14 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif, ret = ath10k_install_peer_wep_keys(arvif, sta->addr); if (ret) { - ath10k_warn("failed to install peer wep keys for vdev %i: %d\n", + ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); if (ret) { - ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n", + ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", sta->addr, arvif->vdev_id, ret); return ret; } @@ -1642,7 +1653,7 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, arvif->num_legacy_stations--; ret = ath10k_recalc_rtscts_prot(arvif); if (ret) { - ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n", + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); return ret; } @@ -1650,7 +1661,7 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif, ret = ath10k_clear_peer_keys(arvif, sta->addr); if (ret) { - ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n", + ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", arvif->vdev_id, ret); return ret; } @@ -1742,7 +1753,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) continue; - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", ch - arg.channels, arg.n_channels, ch->freq, ch->max_power, ch->max_reg_power, @@ -1785,7 +1796,7 @@ static void ath10k_regd_update(struct ath10k *ar) ret = ath10k_update_channel_list(ar); if (ret) - ath10k_warn("failed to update channel list: %d\n", ret); + ath10k_warn(ar, "failed to update channel list: %d\n", ret); regpair = ar->ath_common.regulatory.regpair; @@ -1806,7 +1817,7 @@ static void ath10k_regd_update(struct ath10k *ar) regpair->reg_5ghz_ctl, wmi_dfs_reg); if (ret) - ath10k_warn("failed to set pdev regdomain: %d\n", ret); + ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); } static void ath10k_reg_notifier(struct wiphy *wiphy, @@ -1819,12 +1830,12 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { - ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", request->dfs_region); result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, request->dfs_region); if (!result) - ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n", + ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", request->dfs_region); } @@ -1861,7 +1872,7 @@ static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, if (ar->monitor_started) return ar->monitor_vdev_id; - ath10k_warn("failed to resolve vdev id\n"); + ath10k_warn(ar, "failed to resolve vdev id\n"); return 0; } @@ -1897,6 +1908,7 @@ static void ath10k_tx_wep_key_work(struct work_struct *work) { struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, wep_key_work); + struct ath10k *ar = arvif->ar; int ret, keyidx = arvif->def_wep_key_newidx; mutex_lock(&arvif->ar->conf_mutex); @@ -1907,7 +1919,7 @@ static void ath10k_tx_wep_key_work(struct work_struct *work) if (arvif->def_wep_key_idx == keyidx) goto unlock; - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", arvif->vdev_id, keyidx); ret = ath10k_wmi_vdev_set_param(arvif->ar, @@ -1915,7 +1927,7 @@ static void ath10k_tx_wep_key_work(struct work_struct *work) arvif->ar->wmi.vdev_param->def_keyid, keyidx); if (ret) { - ath10k_warn("failed to update wep key index for vdev %d: %d\n", + ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", arvif->vdev_id, ret); goto unlock; @@ -1995,7 +2007,7 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) ar->fw_features)) { if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >= ATH10K_MAX_NUM_MGMT_PENDING) { - ath10k_warn("reached WMI management tranmist queue limit\n"); + ath10k_warn(ar, "reached WMI management transmit queue limit\n"); ret = -EBUSY; goto exit; } @@ -2019,7 +2031,8 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb) exit: if (ret) { - ath10k_warn("failed to transmit packet, dropping: %d\n", ret); + ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", + ret); ieee80211_free_txskb(ar->hw, skb); } } @@ -2061,7 +2074,7 @@ void ath10k_offchan_tx_work(struct work_struct *work) mutex_lock(&ar->conf_mutex); - ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n", skb); hdr = (struct ieee80211_hdr *)skb->data; @@ -2074,13 +2087,13 @@ void ath10k_offchan_tx_work(struct work_struct *work) if (peer) /* FIXME: should this use ath10k_warn()? */ - ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", peer_addr, vdev_id); if (!peer) { ret = ath10k_peer_create(ar, vdev_id, peer_addr); if (ret) - ath10k_warn("failed to create peer %pM on vdev %d: %d\n", + ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", peer_addr, vdev_id, ret); } @@ -2094,13 +2107,13 @@ void ath10k_offchan_tx_work(struct work_struct *work) ret = wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); if (ret <= 0) - ath10k_warn("timed out waiting for offchannel skb %p\n", + ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", skb); if (!peer) { ret = ath10k_peer_delete(ar, vdev_id, peer_addr); if (ret) - ath10k_warn("failed to delete peer %pM on vdev %d: %d\n", + ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", peer_addr, vdev_id, ret); } @@ -2134,7 +2147,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) ret = ath10k_wmi_mgmt_tx(ar, skb); if (ret) { - ath10k_warn("failed to transmit management frame via WMI: %d\n", + ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", ret); ieee80211_free_txskb(ar->hw, skb); } @@ -2145,34 +2158,40 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) /* Scanning */ /************/ -/* - * This gets called if we dont get a heart-beat during scan. - * This may indicate the FW has hung and we need to abort the - * scan manually to prevent cancel_hw_scan() from deadlocking - */ -void ath10k_reset_scan(unsigned long ptr) +void __ath10k_scan_finish(struct ath10k *ar) { - struct ath10k *ar = (struct ath10k *)ptr; + lockdep_assert_held(&ar->data_lock); - spin_lock_bh(&ar->data_lock); - if (!ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); - return; + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + if (ar->scan.is_roc) + ieee80211_remain_on_channel_expired(ar->hw); + else + ieee80211_scan_completed(ar->hw, + (ar->scan.state == + ATH10K_SCAN_ABORTING)); + /* fall through */ + case ATH10K_SCAN_STARTING: + ar->scan.state = ATH10K_SCAN_IDLE; + ar->scan_channel = NULL; + ath10k_offchan_tx_purge(ar); + cancel_delayed_work(&ar->scan.timeout); + complete_all(&ar->scan.completed); + break; } +} - ath10k_warn("scan timed out, firmware problem?\n"); - - if (ar->scan.is_roc) - ieee80211_remain_on_channel_expired(ar->hw); - else - ieee80211_scan_completed(ar->hw, 1 /* aborted */); - - ar->scan.in_progress = false; - complete_all(&ar->scan.completed); +void ath10k_scan_finish(struct ath10k *ar) +{ + spin_lock_bh(&ar->data_lock); + __ath10k_scan_finish(ar); spin_unlock_bh(&ar->data_lock); } -static int ath10k_abort_scan(struct ath10k *ar) +static int ath10k_scan_stop(struct ath10k *ar) { struct wmi_stop_scan_arg arg = { .req_id = 1, /* FIXME */ @@ -2183,47 +2202,79 @@ static int ath10k_abort_scan(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - del_timer_sync(&ar->scan.timeout); + ret = ath10k_wmi_stop_scan(ar, &arg); + if (ret) { + ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); + goto out; + } - spin_lock_bh(&ar->data_lock); - if (!ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); - return 0; + ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); + if (ret == 0) { + ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); + ret = -ETIMEDOUT; + } else if (ret > 0) { + ret = 0; } - ar->scan.aborting = true; +out: + /* Scan state should be updated upon scan completion but in case + * firmware fails to deliver the event (for whatever reason) it is + * desired to clean up scan state anyway. Firmware may have just + * dropped the scan completion event delivery due to transport pipe + * being overflown with data and/or it can recover on its own before + * next scan request is submitted. + */ + spin_lock_bh(&ar->data_lock); + if (ar->scan.state != ATH10K_SCAN_IDLE) + __ath10k_scan_finish(ar); spin_unlock_bh(&ar->data_lock); - ret = ath10k_wmi_stop_scan(ar, &arg); - if (ret) { - ath10k_warn("failed to stop wmi scan: %d\n", ret); - spin_lock_bh(&ar->data_lock); - ar->scan.in_progress = false; - ath10k_offchan_tx_purge(ar); - spin_unlock_bh(&ar->data_lock); - return -EIO; - } + return ret; +} - ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); - if (ret == 0) - ath10k_warn("timed out while waiting for scan to stop\n"); +static void ath10k_scan_abort(struct ath10k *ar) +{ + int ret; - /* scan completion may be done right after we timeout here, so let's - * check the in_progress and tell mac80211 scan is completed. if we - * don't do that and FW fails to send us scan completion indication - * then userspace won't be able to scan anymore */ - ret = 0; + lockdep_assert_held(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - ath10k_warn("failed to stop scan, it's still in progress\n"); - ar->scan.in_progress = false; - ath10k_offchan_tx_purge(ar); - ret = -ETIMEDOUT; + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + /* This can happen if timeout worker kicked in and called + * abortion while scan completion was being processed. + */ + break; + case ATH10K_SCAN_STARTING: + case ATH10K_SCAN_ABORTING: + ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + ar->scan.state = ATH10K_SCAN_ABORTING; + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_scan_stop(ar); + if (ret) + ath10k_warn(ar, "failed to abort scan: %d\n", ret); + + spin_lock_bh(&ar->data_lock); + break; } + spin_unlock_bh(&ar->data_lock); +} - return ret; +void ath10k_scan_timeout_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, + scan.timeout.work); + + mutex_lock(&ar->conf_mutex); + ath10k_scan_abort(ar); + mutex_unlock(&ar->conf_mutex); } static int ath10k_start_scan(struct ath10k *ar, @@ -2239,17 +2290,16 @@ static int ath10k_start_scan(struct ath10k *ar, ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); if (ret == 0) { - ath10k_abort_scan(ar); - return ret; + ret = ath10k_scan_stop(ar); + if (ret) + ath10k_warn(ar, "failed to stop scan: %d\n", ret); + + return -ETIMEDOUT; } - /* the scan can complete earlier, before we even - * start the timer. in that case the timer handler - * checks ar->scan.in_progress and bails out if its - * false. Add a 200ms margin to account event/command - * processing. */ - mod_timer(&ar->scan.timeout, jiffies + - msecs_to_jiffies(arg->max_scan_time+200)); + /* Add a 200ms margin to account for event/command processing */ + ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, + msecs_to_jiffies(arg->max_scan_time+200)); return 0; } @@ -2269,7 +2319,7 @@ static void ath10k_tx(struct ieee80211_hw *hw, /* We should disable CCK RATE due to P2P */ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) - ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); ATH10K_SKB_CB(skb)->htt.is_offchan = false; ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr); @@ -2289,7 +2339,8 @@ static void ath10k_tx(struct ieee80211_hw *hw, ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; spin_unlock_bh(&ar->data_lock); - ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb); + ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", + skb); skb_queue_tail(&ar->offchan_tx_queue, skb); ieee80211_queue_work(hw, &ar->offchan_tx_work); @@ -2325,8 +2376,7 @@ void ath10k_halt(struct ath10k *ar) ath10k_monitor_stop(ar); } - del_timer_sync(&ar->scan.timeout); - ath10k_reset_scan((unsigned long)ar); + ath10k_scan_finish(ar); ath10k_peer_cleanup_all(ar); ath10k_core_stop(ar); ath10k_hif_power_down(ar); @@ -2380,7 +2430,7 @@ static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, tx_ant); if (ret) { - ath10k_warn("failed to set tx-chainmask: %d, req 0x%x\n", + ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", ret, tx_ant); return ret; } @@ -2388,7 +2438,7 @@ static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, rx_ant); if (ret) { - ath10k_warn("failed to set rx-chainmask: %d, req 0x%x\n", + ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", ret, rx_ant); return ret; } @@ -2439,25 +2489,25 @@ static int ath10k_start(struct ieee80211_hw *hw) ret = ath10k_hif_power_up(ar); if (ret) { - ath10k_err("Could not init hif: %d\n", ret); + ath10k_err(ar, "Could not init hif: %d\n", ret); goto err_off; } ret = ath10k_core_start(ar); if (ret) { - ath10k_err("Could not init core: %d\n", ret); + ath10k_err(ar, "Could not init core: %d\n", ret); goto err_power_down; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); if (ret) { - ath10k_warn("failed to enable PMF QOS: %d\n", ret); + ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); goto err_core_stop; } ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); if (ret) { - ath10k_warn("failed to enable dynamic BW: %d\n", ret); + ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); goto err_core_stop; } @@ -2477,7 +2527,7 @@ static int ath10k_start(struct ieee80211_hw *hw) ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->arp_ac_override, 0); if (ret) { - ath10k_warn("failed to set arp ac override parameter: %d\n", + ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", ret); goto err_core_stop; } @@ -2485,6 +2535,8 @@ static int ath10k_start(struct ieee80211_hw *hw) ar->num_started_vdevs = 0; ath10k_regd_update(ar); + ath10k_spectral_start(ar); + mutex_unlock(&ar->conf_mutex); return 0; @@ -2515,6 +2567,7 @@ static void ath10k_stop(struct ieee80211_hw *hw) } mutex_unlock(&ar->conf_mutex); + cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->restart_work); } @@ -2528,7 +2581,7 @@ static int ath10k_config_ps(struct ath10k *ar) list_for_each_entry(arvif, &ar->arvifs, list) { ret = ath10k_mac_vif_setup_ps(arvif); if (ret) { - ath10k_warn("failed to setup powersave: %d\n", ret); + ath10k_warn(ar, "failed to setup powersave: %d\n", ret); break; } } @@ -2566,7 +2619,7 @@ static void ath10k_config_chan(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n", ar->chandef.chan->center_freq, ar->chandef.center_freq1, @@ -2582,18 +2635,21 @@ static void ath10k_config_chan(struct ath10k *ar) if (!arvif->is_started) continue; + if (!arvif->is_up) + continue; + if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) continue; - ret = ath10k_vdev_stop(arvif); + ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); if (ret) { - ath10k_warn("failed to stop vdev %d: %d\n", + ath10k_warn(ar, "failed to down vdev %d: %d\n", arvif->vdev_id, ret); continue; } } - /* all vdevs are now stopped - now attempt to restart them */ + /* all vdevs are downed now - attempt to restart and re-up them */ list_for_each_entry(arvif, &ar->arvifs, list) { if (!arvif->is_started) @@ -2602,9 +2658,9 @@ static void ath10k_config_chan(struct ath10k *ar) if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) continue; - ret = ath10k_vdev_start(arvif); + ret = ath10k_vdev_restart(arvif); if (ret) { - ath10k_warn("failed to start vdev %d: %d\n", + ath10k_warn(ar, "failed to restart vdev %d: %d\n", arvif->vdev_id, ret); continue; } @@ -2615,7 +2671,7 @@ static void ath10k_config_chan(struct ath10k *ar) ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid); if (ret) { - ath10k_warn("failed to bring vdev up %d: %d\n", + ath10k_warn(ar, "failed to bring vdev up %d: %d\n", arvif->vdev_id, ret); continue; } @@ -2635,7 +2691,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&ar->conf_mutex); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config channel %dMHz flags 0x%x radar %d\n", conf->chandef.chan->center_freq, conf->chandef.chan->flags, @@ -2655,21 +2711,21 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) } if (changed & IEEE80211_CONF_CHANGE_POWER) { - ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n", hw->conf.power_level); param = ar->wmi.pdev_param->txpower_limit2g; ret = ath10k_wmi_pdev_set_param(ar, param, hw->conf.power_level * 2); if (ret) - ath10k_warn("failed to set 2g txpower %d: %d\n", + ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", hw->conf.power_level, ret); param = ar->wmi.pdev_param->txpower_limit5g; ret = ath10k_wmi_pdev_set_param(ar, param, hw->conf.power_level * 2); if (ret) - ath10k_warn("failed to set 5g txpower %d: %d\n", + ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", hw->conf.power_level, ret); } @@ -2681,7 +2737,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) ar->monitor = true; ret = ath10k_monitor_start(ar); if (ret) { - ath10k_warn("failed to start monitor (config): %d\n", + ath10k_warn(ar, "failed to start monitor (config): %d\n", ret); ar->monitor = false; } @@ -2724,11 +2780,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work); INIT_LIST_HEAD(&arvif->list); - bit = ffs(ar->free_vdev_map); - if (bit == 0) { + if (ar->free_vdev_map == 0) { + ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); ret = -EBUSY; goto err; } + bit = ffs(ar->free_vdev_map); arvif->vdev_id = bit - 1; arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; @@ -2760,25 +2817,25 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, break; } - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n", arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype); ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, vif->addr); if (ret) { - ath10k_warn("failed to create WMI vdev %i: %d\n", + ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", arvif->vdev_id, ret); goto err; } - ar->free_vdev_map &= ~BIT(arvif->vdev_id); + ar->free_vdev_map &= ~(1 << arvif->vdev_id); list_add(&arvif->list, &ar->arvifs); vdev_param = ar->wmi.vdev_param->def_keyid; ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param, arvif->def_wep_key_idx); if (ret) { - ath10k_warn("failed to set vdev %i default key id: %d\n", + ath10k_warn(ar, "failed to set vdev %i default key id: %d\n", arvif->vdev_id, ret); goto err_vdev_delete; } @@ -2788,7 +2845,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ATH10K_HW_TXRX_NATIVE_WIFI); /* 10.X firmware does not support this VDEV parameter. Do not warn */ if (ret && ret != -EOPNOTSUPP) { - ath10k_warn("failed to set vdev %i TX encapsulation: %d\n", + ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", arvif->vdev_id, ret); goto err_vdev_delete; } @@ -2796,14 +2853,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr); if (ret) { - ath10k_warn("failed to create vdev %i peer for AP: %d\n", + ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n", arvif->vdev_id, ret); goto err_vdev_delete; } ret = ath10k_mac_set_kickout(arvif); if (ret) { - ath10k_warn("failed to set vdev %i kickout parameters: %d\n", + ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2815,7 +2872,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); if (ret) { - ath10k_warn("failed to set vdev %i RX wake policy: %d\n", + ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2825,7 +2882,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); if (ret) { - ath10k_warn("failed to set vdev %i TX wake thresh: %d\n", + ath10k_warn(ar, "failed to set vdev %i TX wake thresh: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2835,7 +2892,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); if (ret) { - ath10k_warn("failed to set vdev %i PSPOLL count: %d\n", + ath10k_warn(ar, "failed to set vdev %i PSPOLL count: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2843,14 +2900,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); if (ret) { - ath10k_warn("failed to set rts threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); if (ret) { - ath10k_warn("failed to set frag threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n", arvif->vdev_id, ret); goto err_peer_delete; } @@ -2864,7 +2921,7 @@ err_peer_delete: err_vdev_delete: ath10k_wmi_vdev_delete(ar, arvif->vdev_id); - ar->free_vdev_map &= ~BIT(arvif->vdev_id); + ar->free_vdev_map |= 1 << arvif->vdev_id; list_del(&arvif->list); err: @@ -2892,26 +2949,32 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, dev_kfree_skb_any(arvif->beacon); arvif->beacon = NULL; } + spin_unlock_bh(&ar->data_lock); - ar->free_vdev_map |= 1 << (arvif->vdev_id); + ret = ath10k_spectral_vif_stop(arvif); + if (ret) + ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", + arvif->vdev_id, ret); + + ar->free_vdev_map |= 1 << arvif->vdev_id; list_del(&arvif->list); if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr); if (ret) - ath10k_warn("failed to remove peer for AP vdev %i: %d\n", + ath10k_warn(ar, "failed to remove peer for AP vdev %i: %d\n", arvif->vdev_id, ret); kfree(arvif->u.ap.noa_data); } - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", arvif->vdev_id); ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); if (ret) - ath10k_warn("failed to delete WMI vdev %i: %d\n", + ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", arvif->vdev_id, ret); ath10k_peer_cleanup(ar, arvif->vdev_id); @@ -2950,7 +3013,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw, ar->promisc = true; ret = ath10k_monitor_start(ar); if (ret) { - ath10k_warn("failed to start monitor (promisc): %d\n", + ath10k_warn(ar, "failed to start monitor (promisc): %d\n", ret); ar->promisc = false; } @@ -2982,17 +3045,17 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, vdev_param = ar->wmi.vdev_param->beacon_interval; ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, arvif->beacon_interval); - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d beacon_interval %d\n", arvif->vdev_id, arvif->beacon_interval); if (ret) - ath10k_warn("failed to set beacon interval for vdev %d: %i\n", + ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_BEACON) { - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "vdev %d set beacon tx mode to staggered\n", arvif->vdev_id); @@ -3000,14 +3063,14 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ret = ath10k_wmi_pdev_set_param(ar, pdev_param, WMI_BEACON_STAGGERED_MODE); if (ret) - ath10k_warn("failed to set beacon mode for vdev %d: %i\n", + ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", arvif->vdev_id, ret); } if (changed & BSS_CHANGED_BEACON_INFO) { arvif->dtim_period = info->dtim_period; - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d dtim_period %d\n", arvif->vdev_id, arvif->dtim_period); @@ -3015,7 +3078,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, arvif->dtim_period); if (ret) - ath10k_warn("failed to set dtim period for vdev %d: %i\n", + ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", arvif->vdev_id, ret); } @@ -3034,14 +3097,14 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID && vif->type != NL80211_IFTYPE_AP) { if (!is_zero_ether_addr(info->bssid)) { - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d create peer %pM\n", arvif->vdev_id, info->bssid); ret = ath10k_peer_create(ar, arvif->vdev_id, info->bssid); if (ret) - ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n", + ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n", info->bssid, arvif->vdev_id, ret); if (vif->type == NL80211_IFTYPE_STATION) { @@ -3051,13 +3114,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, */ memcpy(arvif->bssid, info->bssid, ETH_ALEN); - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d start %pM\n", arvif->vdev_id, info->bssid); ret = ath10k_vdev_start(arvif); if (ret) { - ath10k_warn("failed to start vdev %i: %d\n", + ath10k_warn(ar, "failed to start vdev %i: %d\n", arvif->vdev_id, ret); goto exit; } @@ -3081,12 +3144,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ERP_CTS_PROT) { arvif->use_cts_prot = info->use_cts_prot; - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", arvif->vdev_id, info->use_cts_prot); ret = ath10k_recalc_rtscts_prot(arvif); if (ret) - ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n", + ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", arvif->vdev_id, ret); } @@ -3098,14 +3161,14 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, else slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", arvif->vdev_id, slottime); vdev_param = ar->wmi.vdev_param->slot_time; ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, slottime); if (ret) - ath10k_warn("failed to set erp slot for vdev %d: %i\n", + ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", arvif->vdev_id, ret); } @@ -3116,7 +3179,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, else preamble = WMI_VDEV_PREAMBLE_LONG; - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d preamble %dn", arvif->vdev_id, preamble); @@ -3124,7 +3187,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, preamble); if (ret) - ath10k_warn("failed to set preamble for vdev %d: %i\n", + ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", arvif->vdev_id, ret); } @@ -3151,20 +3214,26 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + ar->scan.state = ATH10K_SCAN_STARTING; + ar->scan.is_roc = false; + ar->scan.vdev_id = arvif->vdev_id; + ret = 0; + break; + case ATH10K_SCAN_STARTING: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: ret = -EBUSY; - goto exit; + break; } - - reinit_completion(&ar->scan.started); - reinit_completion(&ar->scan.completed); - ar->scan.in_progress = true; - ar->scan.aborting = false; - ar->scan.is_roc = false; - ar->scan.vdev_id = arvif->vdev_id; spin_unlock_bh(&ar->data_lock); + if (ret) + goto exit; + memset(&arg, 0, sizeof(arg)); ath10k_wmi_start_scan_init(ar, &arg); arg.vdev_id = arvif->vdev_id; @@ -3196,9 +3265,9 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, ret = ath10k_start_scan(ar, &arg); if (ret) { - ath10k_warn("failed to start hw scan: %d\n", ret); + ath10k_warn(ar, "failed to start hw scan: %d\n", ret); spin_lock_bh(&ar->data_lock); - ar->scan.in_progress = false; + ar->scan.state = ATH10K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); } @@ -3211,14 +3280,10 @@ static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath10k *ar = hw->priv; - int ret; mutex_lock(&ar->conf_mutex); - ret = ath10k_abort_scan(ar); - if (ret) { - ath10k_warn("failed to abort scan: %d\n", ret); - ieee80211_scan_completed(hw, 1 /* aborted */); - } + cancel_delayed_work_sync(&ar->scan.timeout); + ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); } @@ -3256,7 +3321,7 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, key->keyidx); if (ret) - ath10k_warn("failed to set vdev %i group key as default key: %d\n", + ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", arvif->vdev_id, ret); } @@ -3294,7 +3359,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, if (!peer) { if (cmd == SET_KEY) { - ath10k_warn("failed to install key for non-existent peer %pM\n", + ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", peer_addr); ret = -EOPNOTSUPP; goto exit; @@ -3317,7 +3382,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = ath10k_install_key(arvif, key, cmd, peer_addr); if (ret) { - ath10k_warn("failed to install key for vdev %i peer %pM: %d\n", + ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", arvif->vdev_id, peer_addr, ret); goto exit; } @@ -3332,7 +3397,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, peer->keys[key->keyidx] = NULL; else if (peer == NULL) /* impossible unless FW goes crazy */ - ath10k_warn("Peer %pM disappeared!\n", peer_addr); + ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); spin_unlock_bh(&ar->data_lock); exit: @@ -3368,45 +3433,45 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) mutex_lock(&ar->conf_mutex); if (changed & IEEE80211_RC_BW_CHANGED) { - ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", sta->addr, bw); err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, WMI_PEER_CHAN_WIDTH, bw); if (err) - ath10k_warn("failed to update STA %pM peer bw %d: %d\n", + ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", sta->addr, bw, err); } if (changed & IEEE80211_RC_NSS_CHANGED) { - ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", sta->addr, nss); err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, WMI_PEER_NSS, nss); if (err) - ath10k_warn("failed to update STA %pM nss %d: %d\n", + ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", sta->addr, nss, err); } if (changed & IEEE80211_RC_SMPS_CHANGED) { - ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", sta->addr, smps); err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, WMI_PEER_SMPS_STATE, smps); if (err) - ath10k_warn("failed to update STA %pM smps %d: %d\n", + ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", sta->addr, smps, err); } if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { - ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", sta->addr); err = ath10k_station_assoc(ar, arvif, sta, true); if (err) - ath10k_warn("failed to reassociate station: %pM\n", + ath10k_warn(ar, "failed to reassociate station: %pM\n", sta->addr); } @@ -3451,31 +3516,31 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, max_num_peers = TARGET_NUM_PEERS; if (ar->num_peers >= max_num_peers) { - ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n", + ath10k_warn(ar, "number of peers exceeded: peers number %d (max peers %d)\n", ar->num_peers, max_num_peers); ret = -ENOBUFS; goto exit; } - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d peer create %pM (new sta) num_peers %d\n", arvif->vdev_id, sta->addr, ar->num_peers); ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr); if (ret) - ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n", + ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", sta->addr, arvif->vdev_id, ret); } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { /* * Existing station deletion. */ - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d peer delete %pM (sta gone)\n", arvif->vdev_id, sta->addr); ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) - ath10k_warn("failed to delete peer %pM for vdev %d: %i\n", + ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", sta->addr, arvif->vdev_id, ret); if (vif->type == NL80211_IFTYPE_STATION) @@ -3487,12 +3552,12 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, /* * New association. */ - ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", sta->addr); ret = ath10k_station_assoc(ar, arvif, sta, false); if (ret) - ath10k_warn("failed to associate station %pM for vdev %i: %i\n", + ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH && @@ -3501,12 +3566,12 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, /* * Disassociation. */ - ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", sta->addr); ret = ath10k_station_disassoc(ar, arvif, sta); if (ret) - ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n", + ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", sta->addr, arvif->vdev_id, ret); } exit: @@ -3554,7 +3619,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, WMI_STA_PS_PARAM_UAPSD, arvif->u.sta.uapsd); if (ret) { - ath10k_warn("failed to set uapsd params: %d\n", ret); + ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); goto exit; } @@ -3567,7 +3632,7 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, WMI_STA_PS_PARAM_RX_WAKE_POLICY, value); if (ret) - ath10k_warn("failed to set rx wake param: %d\n", ret); + ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); exit: return ret; @@ -3617,13 +3682,13 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw, /* FIXME: FW accepts wmm params per hw, not per vif */ ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params); if (ret) { - ath10k_warn("failed to set wmm params: %d\n", ret); + ath10k_warn(ar, "failed to set wmm params: %d\n", ret); goto exit; } ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); if (ret) - ath10k_warn("failed to set sta uapsd: %d\n", ret); + ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); exit: mutex_unlock(&ar->conf_mutex); @@ -3641,27 +3706,33 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); struct wmi_start_scan_arg arg; - int ret; + int ret = 0; mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); - if (ar->scan.in_progress) { - spin_unlock_bh(&ar->data_lock); + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + reinit_completion(&ar->scan.started); + reinit_completion(&ar->scan.completed); + reinit_completion(&ar->scan.on_channel); + ar->scan.state = ATH10K_SCAN_STARTING; + ar->scan.is_roc = true; + ar->scan.vdev_id = arvif->vdev_id; + ar->scan.roc_freq = chan->center_freq; + ret = 0; + break; + case ATH10K_SCAN_STARTING: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: ret = -EBUSY; - goto exit; + break; } - - reinit_completion(&ar->scan.started); - reinit_completion(&ar->scan.completed); - reinit_completion(&ar->scan.on_channel); - ar->scan.in_progress = true; - ar->scan.aborting = false; - ar->scan.is_roc = true; - ar->scan.vdev_id = arvif->vdev_id; - ar->scan.roc_freq = chan->center_freq; spin_unlock_bh(&ar->data_lock); + if (ret) + goto exit; + memset(&arg, 0, sizeof(arg)); ath10k_wmi_start_scan_init(ar, &arg); arg.vdev_id = arvif->vdev_id; @@ -3676,17 +3747,21 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, ret = ath10k_start_scan(ar, &arg); if (ret) { - ath10k_warn("failed to start roc scan: %d\n", ret); + ath10k_warn(ar, "failed to start roc scan: %d\n", ret); spin_lock_bh(&ar->data_lock); - ar->scan.in_progress = false; + ar->scan.state = ATH10K_SCAN_IDLE; spin_unlock_bh(&ar->data_lock); goto exit; } ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); if (ret == 0) { - ath10k_warn("failed to switch to channel for roc scan\n"); - ath10k_abort_scan(ar); + ath10k_warn(ar, "failed to switch to channel for roc scan\n"); + + ret = ath10k_scan_stop(ar); + if (ret) + ath10k_warn(ar, "failed to stop scan: %d\n", ret); + ret = -ETIMEDOUT; goto exit; } @@ -3702,7 +3777,8 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) struct ath10k *ar = hw->priv; mutex_lock(&ar->conf_mutex); - ath10k_abort_scan(ar); + cancel_delayed_work_sync(&ar->scan.timeout); + ath10k_scan_abort(ar); mutex_unlock(&ar->conf_mutex); return 0; @@ -3721,12 +3797,12 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) mutex_lock(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", arvif->vdev_id, value); ret = ath10k_mac_set_rts(arvif, value); if (ret) { - ath10k_warn("failed to set rts threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", arvif->vdev_id, ret); break; } @@ -3744,12 +3820,12 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value) mutex_lock(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { - ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n", arvif->vdev_id, value); ret = ath10k_mac_set_rts(arvif, value); if (ret) { - ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n", + ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n", arvif->vdev_id, ret); break; } @@ -3789,7 +3865,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, }), ATH10K_FLUSH_TIMEOUT_HZ); if (ret <= 0 || skip) - ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n", + ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n", skip, ar->state, ret); skip: @@ -3824,7 +3900,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw, ret = ath10k_hif_suspend(ar); if (ret) { - ath10k_warn("failed to suspend hif: %d\n", ret); + ath10k_warn(ar, "failed to suspend hif: %d\n", ret); goto resume; } @@ -3833,7 +3909,7 @@ static int ath10k_suspend(struct ieee80211_hw *hw, resume: ret = ath10k_wmi_pdev_resume_target(ar); if (ret) - ath10k_warn("failed to resume target: %d\n", ret); + ath10k_warn(ar, "failed to resume target: %d\n", ret); ret = 1; exit: @@ -3850,14 +3926,14 @@ static int ath10k_resume(struct ieee80211_hw *hw) ret = ath10k_hif_resume(ar); if (ret) { - ath10k_warn("failed to resume hif: %d\n", ret); + ath10k_warn(ar, "failed to resume hif: %d\n", ret); ret = 1; goto exit; } ret = ath10k_wmi_pdev_resume_target(ar); if (ret) { - ath10k_warn("failed to resume target: %d\n", ret); + ath10k_warn(ar, "failed to resume target: %d\n", ret); ret = 1; goto exit; } @@ -3878,7 +3954,7 @@ static void ath10k_restart_complete(struct ieee80211_hw *hw) /* If device failed to restart it will be in a different state, e.g. * ATH10K_STATE_WEDGED */ if (ar->state == ATH10K_STATE_RESTARTED) { - ath10k_info("device successfully recovered\n"); + ath10k_info(ar, "device successfully recovered\n"); ar->state = ATH10K_STATE_ON; } @@ -4075,7 +4151,8 @@ ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask, } static bool -ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask, +ath10k_bitrate_mask_rate(struct ath10k *ar, + const struct cfg80211_bitrate_mask *mask, enum ieee80211_band band, u8 *fixed_rate, u8 *fixed_nss) @@ -4133,7 +4210,7 @@ ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask, nss <<= 4; pream <<= 6; - ath10k_dbg(ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n", pream, nss, rate); *fixed_rate = pream | nss | rate; @@ -4141,7 +4218,8 @@ ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask, return true; } -static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask, +static bool ath10k_get_fixed_rate_nss(struct ath10k *ar, + const struct cfg80211_bitrate_mask *mask, enum ieee80211_band band, u8 *fixed_rate, u8 *fixed_nss) @@ -4151,7 +4229,7 @@ static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask, return true; /* Next Check single rate is set */ - return ath10k_bitrate_mask_rate(mask, band, fixed_rate, fixed_nss); + return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss); } static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, @@ -4171,16 +4249,16 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, goto exit; if (fixed_rate == WMI_FIXED_RATE_NONE) - ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n"); if (force_sgi) - ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n"); + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n"); vdev_param = ar->wmi.vdev_param->fixed_rate; ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, fixed_rate); if (ret) { - ath10k_warn("failed to set fixed rate param 0x%02x: %d\n", + ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", fixed_rate, ret); ret = -EINVAL; goto exit; @@ -4193,7 +4271,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, vdev_param, fixed_nss); if (ret) { - ath10k_warn("failed to set fixed nss param %d: %d\n", + ath10k_warn(ar, "failed to set fixed nss param %d: %d\n", fixed_nss, ret); ret = -EINVAL; goto exit; @@ -4206,7 +4284,7 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif, force_sgi); if (ret) { - ath10k_warn("failed to set sgi param %d: %d\n", + ath10k_warn(ar, "failed to set sgi param %d: %d\n", force_sgi, ret); ret = -EINVAL; goto exit; @@ -4235,14 +4313,14 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw, return -EINVAL; if (!ath10k_default_bitrate_mask(ar, band, mask)) { - if (!ath10k_get_fixed_rate_nss(mask, band, + if (!ath10k_get_fixed_rate_nss(ar, mask, band, &fixed_rate, &fixed_nss)) return -EINVAL; } if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) { - ath10k_warn("failed to force SGI usage for default rate settings\n"); + ath10k_warn(ar, "failed to force SGI usage for default rate settings\n"); return -EINVAL; } @@ -4261,7 +4339,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, spin_lock_bh(&ar->data_lock); - ath10k_dbg(ATH10K_DBG_MAC, + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", sta->addr, changed, sta->bandwidth, sta->rx_nss, sta->smps_mode); @@ -4280,7 +4358,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, bw = WMI_PEER_CHWIDTH_80MHZ; break; case IEEE80211_STA_RX_BW_160: - ath10k_warn("Invalid bandwith %d in rc update for %pM\n", + ath10k_warn(ar, "Invalid bandwith %d in rc update for %pM\n", sta->bandwidth, sta->addr); bw = WMI_PEER_CHWIDTH_20MHZ; break; @@ -4307,7 +4385,7 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw, smps = WMI_PEER_SMPS_DYNAMIC; break; case IEEE80211_SMPS_NUM_MODES: - ath10k_warn("Invalid smps %d in sta rc update for %pM\n", + ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", sta->smps_mode, sta->addr); smps = WMI_PEER_SMPS_PS_NONE; break; @@ -4339,9 +4417,10 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size) { + struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - ath10k_dbg(ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", arvif->vdev_id, sta->addr, tid, action); switch (action) { @@ -4489,12 +4568,12 @@ static struct ieee80211_rate ath10k_rates[] = { #define ath10k_g_rates (ath10k_rates + 0) #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) -struct ath10k *ath10k_mac_create(void) +struct ath10k *ath10k_mac_create(size_t priv_size) { struct ieee80211_hw *hw; struct ath10k *ar; - hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops); + hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops); if (!hw) return NULL; @@ -4669,7 +4748,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) ath10k_get_arvif_iter, &arvif_iter); if (!arvif_iter.arvif) { - ath10k_warn("No VIF found for vdev %d\n", vdev_id); + ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); return NULL; } @@ -4815,19 +4894,19 @@ int ath10k_mac_register(struct ath10k *ar) NL80211_DFS_UNSET); if (!ar->dfs_detector) - ath10k_warn("failed to initialise DFS pattern detector\n"); + ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); } ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, ath10k_reg_notifier); if (ret) { - ath10k_err("failed to initialise regulatory: %i\n", ret); + ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); goto err_free; } ret = ieee80211_register_hw(ar->hw); if (ret) { - ath10k_err("failed to register ieee80211: %d\n", ret); + ath10k_err(ar, "failed to register ieee80211: %d\n", ret); goto err_free; } diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index ef4f84376d7c..6c80eeada3e2 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -26,12 +26,14 @@ struct ath10k_generic_iter { int ret; }; -struct ath10k *ath10k_mac_create(void); +struct ath10k *ath10k_mac_create(size_t priv_size); void ath10k_mac_destroy(struct ath10k *ar); int ath10k_mac_register(struct ath10k *ar); void ath10k_mac_unregister(struct ath10k *ar); struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id); -void ath10k_reset_scan(unsigned long ptr); +void __ath10k_scan_finish(struct ath10k *ar); +void ath10k_scan_finish(struct ath10k *ar); +void ath10k_scan_timeout_work(struct work_struct *work); void ath10k_offchan_tx_purge(struct ath10k *ar); void ath10k_offchan_tx_work(struct work_struct *work); void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 3376963a4862..056a35a77133 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -44,13 +44,9 @@ enum ath10k_pci_reset_mode { ATH10K_PCI_RESET_WARM_ONLY = 1, }; -static unsigned int ath10k_pci_target_ps; static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; -module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644); -MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option"); - module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); @@ -71,10 +67,7 @@ static const struct pci_device_id ath10k_pci_id_table[] = { static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, u32 *data); -static int ath10k_pci_post_rx(struct ath10k *ar); -static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, - int num); -static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info); +static void ath10k_pci_buffer_cleanup(struct ath10k *ar); static int ath10k_pci_cold_reset(struct ath10k *ar); static int ath10k_pci_warm_reset(struct ath10k *ar); static int ath10k_pci_wait_for_target_init(struct ath10k *ar); @@ -156,79 +149,175 @@ static const struct ce_attr host_ce_config_wlan[] = { static const struct ce_pipe_config target_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ { - .pipenum = 0, - .pipedir = PIPEDIR_OUT, - .nentries = 32, - .nbytes_max = 256, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(0), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE1: target->host HTT + HTC control */ { - .pipenum = 1, - .pipedir = PIPEDIR_IN, - .nentries = 32, - .nbytes_max = 512, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(1), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(512), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE2: target->host WMI */ { - .pipenum = 2, - .pipedir = PIPEDIR_IN, - .nentries = 32, - .nbytes_max = 2048, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(2), + .pipedir = __cpu_to_le32(PIPEDIR_IN), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE3: host->target WMI */ { - .pipenum = 3, - .pipedir = PIPEDIR_OUT, - .nentries = 32, - .nbytes_max = 2048, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(3), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE4: host->target HTT */ { - .pipenum = 4, - .pipedir = PIPEDIR_OUT, - .nentries = 256, - .nbytes_max = 256, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(4), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(256), + .nbytes_max = __cpu_to_le32(256), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* NB: 50% of src nentries, since tx has 2 frags */ /* CE5: unused */ { - .pipenum = 5, - .pipedir = PIPEDIR_OUT, - .nentries = 32, - .nbytes_max = 2048, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(5), + .pipedir = __cpu_to_le32(PIPEDIR_OUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(2048), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE6: Reserved for target autonomous hif_memcpy */ { - .pipenum = 6, - .pipedir = PIPEDIR_INOUT, - .nentries = 32, - .nbytes_max = 4096, - .flags = CE_ATTR_FLAGS, - .reserved = 0, + .pipenum = __cpu_to_le32(6), + .pipedir = __cpu_to_le32(PIPEDIR_INOUT), + .nentries = __cpu_to_le32(32), + .nbytes_max = __cpu_to_le32(4096), + .flags = __cpu_to_le32(CE_ATTR_FLAGS), + .reserved = __cpu_to_le32(0), }, /* CE7 used only by Host */ }; +/* + * Map from service/endpoint to Copy Engine. + * This table is derived from the CE_PCI TABLE, above. + * It is passed to the Target at startup for use by firmware. + */ +static const struct service_to_pipe target_service_to_ce_map_wlan[] = { + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + static bool ath10k_pci_irq_pending(struct ath10k *ar) { u32 cause; @@ -270,44 +359,111 @@ static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) PCIE_INTR_ENABLE_ADDRESS); } -static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg) +static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) { - struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - if (ar_pci->num_msi_intrs == 0) { - if (!ath10k_pci_irq_pending(ar)) - return IRQ_NONE; - - ath10k_pci_disable_and_clear_legacy_irq(ar); - } - - tasklet_schedule(&ar_pci->early_irq_tasklet); - - return IRQ_HANDLED; + if (ar_pci->num_msi_intrs > 1) + return "msi-x"; + else if (ar_pci->num_msi_intrs == 1) + return "msi"; + else + return "legacy"; } -static int ath10k_pci_request_early_irq(struct ath10k *ar) +static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) { + struct ath10k *ar = pipe->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + struct sk_buff *skb; + dma_addr_t paddr; int ret; - /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first - * interrupt from irq vector is triggered in all cases for FW - * indication/errors */ - ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler, - IRQF_SHARED, "ath10k_pci (early)", ar); + lockdep_assert_held(&ar_pci->ce_lock); + + skb = dev_alloc_skb(pipe->buf_sz); + if (!skb) + return -ENOMEM; + + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); + + paddr = dma_map_single(ar->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ar->dev, paddr))) { + ath10k_warn(ar, "failed to dma map pci rx buf\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + + ATH10K_SKB_CB(skb)->paddr = paddr; + + ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); if (ret) { - ath10k_warn("failed to request early irq: %d\n", ret); + ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); + dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); return ret; } return 0; } -static void ath10k_pci_free_early_irq(struct ath10k *ar) +static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + int ret, num; + + lockdep_assert_held(&ar_pci->ce_lock); + + if (pipe->buf_sz == 0) + return; + + if (!ce_pipe->dest_ring) + return; + + num = __ath10k_ce_rx_num_free_bufs(ce_pipe); + while (num--) { + ret = __ath10k_pci_rx_post_buf(pipe); + if (ret) { + ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); + mod_timer(&ar_pci->rx_post_retry, jiffies + + ATH10K_PCI_RX_POST_RETRY_MS); + break; + } + } +} + +static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + spin_lock_bh(&ar_pci->ce_lock); + __ath10k_pci_rx_post_pipe(pipe); + spin_unlock_bh(&ar_pci->ce_lock); +} + +static void ath10k_pci_rx_post(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int i; + + spin_lock_bh(&ar_pci->ce_lock); + for (i = 0; i < CE_COUNT; i++) + __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); + spin_unlock_bh(&ar_pci->ce_lock); +} + +static void ath10k_pci_rx_replenish_retry(unsigned long ptr) { - free_irq(ath10k_pci_priv(ar)->pdev->irq, ar); + struct ath10k *ar = (void *)ptr; + + ath10k_pci_rx_post(ar); } /* @@ -376,7 +532,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); - ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data); + ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); if (ret != 0) goto done; @@ -389,10 +545,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, * convert it from Target CPU virtual address space * to CE address space */ - ath10k_pci_wake(ar); address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); - ath10k_pci_sleep(ar); ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0); @@ -448,15 +602,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } done: - if (ret == 0) { - /* Copy data from allocated DMA buf to caller's buf */ - WARN_ON_ONCE(orig_nbytes & 3); - for (i = 0; i < orig_nbytes / sizeof(__le32); i++) { - ((u32 *)data)[i] = - __le32_to_cpu(((__le32 *)data_buf)[i]); - } - } else - ath10k_warn("failed to read diag value at 0x%x: %d\n", + if (ret == 0) + memcpy(data, data_buf, orig_nbytes); + else + ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", address, ret); if (data_buf) @@ -466,17 +615,54 @@ done: return ret; } +static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) +{ + __le32 val = 0; + int ret; + + ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); + *value = __le32_to_cpu(val); + + return ret; +} + +static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, + u32 src, u32 len) +{ + u32 host_addr, addr; + int ret; + + host_addr = host_interest_item_address(src); + + ret = ath10k_pci_diag_read32(ar, host_addr, &addr); + if (ret != 0) { + ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", + src, ret); + return ret; + } + + ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); + if (ret != 0) { + ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", + addr, len, ret); + return ret; + } + + return 0; +} + +#define ath10k_pci_diag_read_hi(ar, dest, src, len) \ + __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len); + /* Read 4-byte aligned data from Target memory or register */ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address, u32 *data) { /* Assume range doesn't cross this boundary */ if (address >= DRAM_BASE_ADDRESS) - return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32)); + return ath10k_pci_diag_read32(ar, address, data); - ath10k_pci_wake(ar); *data = ath10k_pci_read32(ar, address); - ath10k_pci_sleep(ar); return 0; } @@ -514,9 +700,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, } /* Copy caller's data to allocated DMA buf */ - WARN_ON_ONCE(orig_nbytes & 3); - for (i = 0; i < orig_nbytes / sizeof(__le32); i++) - ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]); + memcpy(data_buf, data, orig_nbytes); /* * The address supplied by the caller is in the @@ -528,9 +712,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * to * CE address space */ - ath10k_pci_wake(ar); address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address); - ath10k_pci_sleep(ar); remaining_bytes = orig_nbytes; ce_data = ce_data_base; @@ -539,7 +721,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); /* Set up to receive directly into Target(!) address */ - ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address); + ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address); if (ret != 0) goto done; @@ -608,66 +790,46 @@ done: } if (ret != 0) - ath10k_warn("failed to write diag value at 0x%x: %d\n", + ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); return ret; } +static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) +{ + __le32 val = __cpu_to_le32(value); + + return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); +} + /* Write 4B data to Target memory or register */ static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address, u32 data) { /* Assume range doesn't cross this boundary */ if (address >= DRAM_BASE_ADDRESS) - return ath10k_pci_diag_write_mem(ar, address, &data, - sizeof(u32)); + return ath10k_pci_diag_write32(ar, address, data); - ath10k_pci_wake(ar); ath10k_pci_write32(ar, address, data); - ath10k_pci_sleep(ar); return 0; } -static bool ath10k_pci_target_is_awake(struct ath10k *ar) +static bool ath10k_pci_is_awake(struct ath10k *ar) { - void __iomem *mem = ath10k_pci_priv(ar)->mem; - u32 val; - val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + - RTC_STATE_ADDRESS); - return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON); + u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS); + + return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; } -int ath10k_do_pci_wake(struct ath10k *ar) +static int ath10k_pci_wake_wait(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *pci_addr = ar_pci->mem; int tot_delay = 0; int curr_delay = 5; - if (atomic_read(&ar_pci->keep_awake_count) == 0) { - /* Force AWAKE */ - iowrite32(PCIE_SOC_WAKE_V_MASK, - pci_addr + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); - } - atomic_inc(&ar_pci->keep_awake_count); - - if (ar_pci->verified_awake) - return 0; - - for (;;) { - if (ath10k_pci_target_is_awake(ar)) { - ar_pci->verified_awake = true; + while (tot_delay < PCIE_WAKE_TIMEOUT) { + if (ath10k_pci_is_awake(ar)) return 0; - } - - if (tot_delay > PCIE_WAKE_TIMEOUT) { - ath10k_warn("target took longer %d us to wake up (awake count %d)\n", - PCIE_WAKE_TIMEOUT, - atomic_read(&ar_pci->keep_awake_count)); - return -ETIMEDOUT; - } udelay(curr_delay); tot_delay += curr_delay; @@ -675,20 +837,21 @@ int ath10k_do_pci_wake(struct ath10k *ar) if (curr_delay < 50) curr_delay += 5; } + + return -ETIMEDOUT; } -void ath10k_do_pci_sleep(struct ath10k *ar) +static int ath10k_pci_wake(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - void __iomem *pci_addr = ar_pci->mem; + ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_V_MASK); + return ath10k_pci_wake_wait(ar); +} - if (atomic_dec_and_test(&ar_pci->keep_awake_count)) { - /* Allow sleep */ - ar_pci->verified_awake = false; - iowrite32(PCIE_SOC_WAKE_RESET, - pci_addr + PCIE_LOCAL_BASE_ADDRESS + - PCIE_SOC_WAKE_ADDRESS); - } +static void ath10k_pci_sleep(struct ath10k *ar) +{ + ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, + PCIE_SOC_WAKE_RESET); } /* Called by lower (CE) layer when a send to Target completes. */ @@ -726,19 +889,17 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) unsigned int nbytes, max_nbytes; unsigned int transfer_id; unsigned int flags; - int err, num_replenish = 0; while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, &ce_data, &nbytes, &transfer_id, &flags) == 0) { - num_replenish++; skb = transfer_context; max_nbytes = skb->len + skb_tailroom(skb); dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); if (unlikely(max_nbytes < nbytes)) { - ath10k_warn("rxed more than expected (nbytes %d, max %d)", + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", nbytes, max_nbytes); dev_kfree_skb_any(skb); continue; @@ -748,12 +909,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) cb->rx_completion(ar, skb, pipe_info->pipe_num); } - err = ath10k_pci_post_rx_pipe(pipe_info, num_replenish); - if (unlikely(err)) { - /* FIXME: retry */ - ath10k_warn("failed to replenish CE rx ring %d (%d bufs): %d\n", - pipe_info->pipe_num, num_replenish, err); - } + ath10k_pci_rx_post_pipe(pipe_info); } static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, @@ -781,10 +937,10 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, } for (i = 0; i < n_items - 1; i++) { - ath10k_dbg(ATH10K_DBG_PCI, + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci tx item %d paddr 0x%08x len %d n_items %d\n", i, items[i].paddr, items[i].len, n_items); - ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); err = ath10k_ce_send_nolock(ce_pipe, @@ -799,10 +955,10 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, /* `i` is equal to `n_items -1` after for() */ - ath10k_dbg(ATH10K_DBG_PCI, + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci tx item %d paddr 0x%08x len %d n_items %d\n", i, items[i].paddr, items[i].len, n_items); - ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ", + ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); err = ath10k_ce_send_nolock(ce_pipe, @@ -829,52 +985,64 @@ static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); } -static void ath10k_pci_hif_dump_area(struct ath10k *ar) +static void ath10k_pci_dump_registers(struct ath10k *ar, + struct ath10k_fw_crash_data *crash_data) { - u32 reg_dump_area = 0; - u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; - u32 host_addr; - int ret; - u32 i; + __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; + int i, ret; - ath10k_err("firmware crashed!\n"); - ath10k_err("hardware name %s version 0x%x\n", - ar->hw_params.name, ar->target_version); - ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version); + lockdep_assert_held(&ar->data_lock); - host_addr = host_interest_item_address(HI_ITEM(hi_failure_state)); - ret = ath10k_pci_diag_read_mem(ar, host_addr, - ®_dump_area, sizeof(u32)); + ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], + hi_failure_state, + REG_DUMP_COUNT_QCA988X * sizeof(__le32)); if (ret) { - ath10k_err("failed to read FW dump area address: %d\n", ret); - return; - } - - ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area); - - ret = ath10k_pci_diag_read_mem(ar, reg_dump_area, - ®_dump_values[0], - REG_DUMP_COUNT_QCA988X * sizeof(u32)); - if (ret != 0) { - ath10k_err("failed to read FW dump area: %d\n", ret); + ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); return; } BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); - ath10k_err("target Register Dump\n"); + ath10k_err(ar, "firmware register dump:\n"); for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) - ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", + ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, - reg_dump_values[i], - reg_dump_values[i + 1], - reg_dump_values[i + 2], - reg_dump_values[i + 3]); + __le32_to_cpu(reg_dump_values[i]), + __le32_to_cpu(reg_dump_values[i + 1]), + __le32_to_cpu(reg_dump_values[i + 2]), + __le32_to_cpu(reg_dump_values[i + 3])); + + if (!crash_data) + return; + + for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) + crash_data->registers[i] = reg_dump_values[i]; +} + +static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) +{ + struct ath10k_fw_crash_data *crash_data; + char uuid[50]; + + spin_lock_bh(&ar->data_lock); + + crash_data = ath10k_debug_get_new_fw_crash_data(ar); + + if (crash_data) + scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); + else + scnprintf(uuid, sizeof(uuid), "n/a"); + + ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); + ath10k_print_driver_info(ar); + ath10k_pci_dump_registers(ar, crash_data); + + spin_unlock_bh(&ar->data_lock); queue_work(ar->workqueue, &ar->restart_work); } @@ -882,7 +1050,7 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar) static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, int force) { - ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); if (!force) { int resources; @@ -910,43 +1078,12 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n"); memcpy(&ar_pci->msg_callbacks_current, callbacks, sizeof(ar_pci->msg_callbacks_current)); } -static int ath10k_pci_setup_ce_irq(struct ath10k *ar) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - const struct ce_attr *attr; - struct ath10k_pci_pipe *pipe_info; - int pipe_num, disable_interrupts; - - for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - - /* Handle Diagnostic CE specially */ - if (pipe_info->ce_hdl == ar_pci->ce_diag) - continue; - - attr = &host_ce_config_wlan[pipe_num]; - - if (attr->src_nentries) { - disable_interrupts = attr->flags & CE_ATTR_DIS_INTR; - ath10k_ce_send_cb_register(pipe_info->ce_hdl, - ath10k_pci_ce_send_done, - disable_interrupts); - } - - if (attr->dest_nentries) - ath10k_ce_recv_cb_register(pipe_info->ce_hdl, - ath10k_pci_ce_recv_data); - } - - return 0; -} - static void ath10k_pci_kill_tasklet(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -954,74 +1091,64 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar) tasklet_kill(&ar_pci->intr_tq); tasklet_kill(&ar_pci->msi_fw_err); - tasklet_kill(&ar_pci->early_irq_tasklet); for (i = 0; i < CE_COUNT; i++) tasklet_kill(&ar_pci->pipe_info[i].intr); + + del_timer_sync(&ar_pci->rx_post_retry); } -/* TODO - temporary mapping while we have too few CE's */ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, u8 *ul_pipe, u8 *dl_pipe, int *ul_is_polled, int *dl_is_polled) { - int ret = 0; + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; - ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); /* polling for received messages not supported */ *dl_is_polled = 0; - switch (service_id) { - case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: - /* - * Host->target HTT gets its own pipe, so it can be polled - * while other pipes are interrupt driven. - */ - *ul_pipe = 4; - /* - * Use the same target->host pipe for HTC ctrl, HTC raw - * streams, and HTT. - */ - *dl_pipe = 1; - break; + for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { + entry = &target_service_to_ce_map_wlan[i]; - case ATH10K_HTC_SVC_ID_RSVD_CTRL: - case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: - /* - * Note: HTC_RAW_STREAMS_SVC is currently unused, and - * HTC_CTRL_RSVD_SVC could share the same pipe as the - * WMI services. So, if another CE is needed, change - * this to *ul_pipe = 3, which frees up CE 0. - */ - /* *ul_pipe = 3; */ - *ul_pipe = 0; - *dl_pipe = 1; - break; - - case ATH10K_HTC_SVC_ID_WMI_DATA_BK: - case ATH10K_HTC_SVC_ID_WMI_DATA_BE: - case ATH10K_HTC_SVC_ID_WMI_DATA_VI: - case ATH10K_HTC_SVC_ID_WMI_DATA_VO: + if (__le32_to_cpu(entry->service_id) != service_id) + continue; - case ATH10K_HTC_SVC_ID_WMI_CONTROL: - *ul_pipe = 3; - *dl_pipe = 2; - break; + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } - /* pipe 5 unused */ - /* pipe 6 reserved */ - /* pipe 7 reserved */ + if (WARN_ON(!ul_set || !dl_set)) + return -ENOENT; - default: - ret = -1; - break; - } *ul_is_polled = (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0; - return ret; + return 0; } static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, @@ -1029,7 +1156,7 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, { int ul_is_polled, dl_is_polled; - ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); (void)ath10k_pci_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_RSVD_CTRL, @@ -1039,141 +1166,48 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, &dl_is_polled); } -static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info, - int num) +static void ath10k_pci_irq_disable(struct ath10k *ar) { - struct ath10k *ar = pipe_info->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl; - struct sk_buff *skb; - dma_addr_t ce_data; - int i, ret = 0; - - if (pipe_info->buf_sz == 0) - return 0; - - for (i = 0; i < num; i++) { - skb = dev_alloc_skb(pipe_info->buf_sz); - if (!skb) { - ath10k_warn("failed to allocate skbuff for pipe %d\n", - num); - ret = -ENOMEM; - goto err; - } - - WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); - - ce_data = dma_map_single(ar->dev, skb->data, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); - - if (unlikely(dma_mapping_error(ar->dev, ce_data))) { - ath10k_warn("failed to DMA map sk_buff\n"); - dev_kfree_skb_any(skb); - ret = -EIO; - goto err; - } - - ATH10K_SKB_CB(skb)->paddr = ce_data; - - pci_dma_sync_single_for_device(ar_pci->pdev, ce_data, - pipe_info->buf_sz, - PCI_DMA_FROMDEVICE); + int i; - ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb, - ce_data); - if (ret) { - ath10k_warn("failed to enqueue to pipe %d: %d\n", - num, ret); - goto err; - } - } + ath10k_ce_disable_interrupts(ar); - return ret; + /* Regardless how many interrupts were assigned for MSI the first one + * is always used for firmware indications (crashes). There's no way to + * mask the irq in the device so call disable_irq(). Legacy (shared) + * interrupts can be masked on the device though. + */ + if (ar_pci->num_msi_intrs > 0) + disable_irq(ar_pci->pdev->irq); + else + ath10k_pci_disable_and_clear_legacy_irq(ar); -err: - ath10k_pci_rx_pipe_cleanup(pipe_info); - return ret; + for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) + synchronize_irq(ar_pci->pdev->irq + i); } -static int ath10k_pci_post_rx(struct ath10k *ar) +static void ath10k_pci_irq_enable(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_pci_pipe *pipe_info; - const struct ce_attr *attr; - int pipe_num, ret = 0; - for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - attr = &host_ce_config_wlan[pipe_num]; - - if (attr->dest_nentries == 0) - continue; - - ret = ath10k_pci_post_rx_pipe(pipe_info, - attr->dest_nentries - 1); - if (ret) { - ath10k_warn("failed to post RX buffer for pipe %d: %d\n", - pipe_num, ret); - - for (; pipe_num >= 0; pipe_num--) { - pipe_info = &ar_pci->pipe_info[pipe_num]; - ath10k_pci_rx_pipe_cleanup(pipe_info); - } - return ret; - } - } + ath10k_ce_enable_interrupts(ar); - return 0; + /* See comment in ath10k_pci_irq_disable() */ + if (ar_pci->num_msi_intrs > 0) + enable_irq(ar_pci->pdev->irq); + else + ath10k_pci_enable_legacy_irq(ar); } static int ath10k_pci_hif_start(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret, ret_early; - - ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n"); - - ath10k_pci_free_early_irq(ar); - ath10k_pci_kill_tasklet(ar); - - ret = ath10k_pci_request_irq(ar); - if (ret) { - ath10k_warn("failed to post RX buffers for all pipes: %d\n", - ret); - goto err_early_irq; - } - - ret = ath10k_pci_setup_ce_irq(ar); - if (ret) { - ath10k_warn("failed to setup CE interrupts: %d\n", ret); - goto err_stop; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); - /* Post buffers once to start things off. */ - ret = ath10k_pci_post_rx(ar); - if (ret) { - ath10k_warn("failed to post RX buffers for all pipes: %d\n", - ret); - goto err_stop; - } + ath10k_pci_irq_enable(ar); + ath10k_pci_rx_post(ar); - ar_pci->started = 1; return 0; - -err_stop: - ath10k_ce_disable_interrupts(ar); - ath10k_pci_free_irq(ar); - ath10k_pci_kill_tasklet(ar); -err_early_irq: - /* Though there should be no interrupts (device was reset) - * power_down() expects the early IRQ to be installed as per the - * driver lifecycle. */ - ret_early = ath10k_pci_request_early_irq(ar); - if (ret_early) - ath10k_warn("failed to re-enable early irq: %d\n", ret_early); - - return ret; } static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) @@ -1193,10 +1227,6 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) ar = pipe_info->hif_ce_state; ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci->started) - return; - ce_hdl = pipe_info->ce_hdl; while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf, @@ -1227,10 +1257,6 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info) ar = pipe_info->hif_ce_state; ar_pci = ath10k_pci_priv(ar); - - if (!ar_pci->started) - return; - ce_hdl = pipe_info->ce_hdl; while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, @@ -1275,41 +1301,24 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar) ath10k_ce_deinit_pipe(ar, i); } -static void ath10k_pci_hif_stop(struct ath10k *ar) +static void ath10k_pci_flush(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - int ret; - - ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n"); - - if (WARN_ON(!ar_pci->started)) - return; - - ret = ath10k_ce_disable_interrupts(ar); - if (ret) - ath10k_warn("failed to disable CE interrupts: %d\n", ret); - - ath10k_pci_free_irq(ar); ath10k_pci_kill_tasklet(ar); + ath10k_pci_buffer_cleanup(ar); +} - ret = ath10k_pci_request_early_irq(ar); - if (ret) - ath10k_warn("failed to re-enable early irq: %d\n", ret); - - /* At this point, asynchronous threads are stopped, the target should - * not DMA nor interrupt. We process the leftovers and then free - * everything else up. */ +static void ath10k_pci_hif_stop(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); - ath10k_pci_buffer_cleanup(ar); + ath10k_pci_irq_disable(ar); + ath10k_pci_flush(ar); - /* Make the sure the device won't access any structures on the host by - * resetting it. The device was fed with PCI CE ringbuffer - * configuration during init. If ringbuffers are freed and the device - * were to access them this could lead to memory corruption on the - * host. */ + /* Most likely the device has HTT Rx ring configured. The only way to + * prevent the device from accessing (and possible corrupting) host + * memory is to reset the chip now. + */ ath10k_pci_warm_reset(ar); - - ar_pci->started = 0; } static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, @@ -1360,7 +1369,7 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, xfer.wait_for_resp = true; xfer.resp_len = 0; - ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr); + ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); } ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); @@ -1418,6 +1427,7 @@ static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) { + struct ath10k *ar = ce_state->ar; struct bmi_xfer *xfer; u32 ce_data; unsigned int nbytes; @@ -1429,7 +1439,7 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) return; if (!xfer->wait_for_resp) { - ath10k_warn("unexpected: BMI data received; ignoring\n"); + ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); return; } @@ -1457,102 +1467,6 @@ static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, } /* - * Map from service/endpoint to Copy Engine. - * This table is derived from the CE_PCI TABLE, above. - * It is passed to the Target at startup for use by firmware. - */ -static const struct service_to_pipe target_service_to_ce_map_wlan[] = { - { - ATH10K_HTC_SVC_ID_WMI_DATA_VO, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_VO, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BK, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BK, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BE, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_BE, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_VI, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_DATA_VI, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_WMI_CONTROL, - PIPEDIR_OUT, /* out = UL = host -> target */ - 3, - }, - { - ATH10K_HTC_SVC_ID_WMI_CONTROL, - PIPEDIR_IN, /* in = DL = target -> host */ - 2, - }, - { - ATH10K_HTC_SVC_ID_RSVD_CTRL, - PIPEDIR_OUT, /* out = UL = host -> target */ - 0, /* could be moved to 3 (share with WMI) */ - }, - { - ATH10K_HTC_SVC_ID_RSVD_CTRL, - PIPEDIR_IN, /* in = DL = target -> host */ - 1, - }, - { - ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ - PIPEDIR_OUT, /* out = UL = host -> target */ - 0, - }, - { - ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */ - PIPEDIR_IN, /* in = DL = target -> host */ - 1, - }, - { - ATH10K_HTC_SVC_ID_HTT_DATA_MSG, - PIPEDIR_OUT, /* out = UL = host -> target */ - 4, - }, - { - ATH10K_HTC_SVC_ID_HTT_DATA_MSG, - PIPEDIR_IN, /* in = DL = target -> host */ - 1, - }, - - /* (Additions here) */ - - { /* Must be last */ - 0, - 0, - 0, - }, -}; - -/* * Send an interrupt to the device to wake up the Target CPU * so it has an opportunity to notice any changed state. */ @@ -1565,7 +1479,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) CORE_CTRL_ADDRESS, &core_ctrl); if (ret) { - ath10k_warn("failed to read core_ctrl: %d\n", ret); + ath10k_warn(ar, "failed to read core_ctrl: %d\n", ret); return ret; } @@ -1576,7 +1490,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) CORE_CTRL_ADDRESS, core_ctrl); if (ret) { - ath10k_warn("failed to set target CPU interrupt mask: %d\n", + ath10k_warn(ar, "failed to set target CPU interrupt mask: %d\n", ret); return ret; } @@ -1605,13 +1519,13 @@ static int ath10k_pci_init_config(struct ath10k *ar) ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr, &pcie_state_targ_addr); if (ret != 0) { - ath10k_err("Failed to get pcie state addr: %d\n", ret); + ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); return ret; } if (pcie_state_targ_addr == 0) { ret = -EIO; - ath10k_err("Invalid pcie state addr\n"); + ath10k_err(ar, "Invalid pcie state addr\n"); return ret; } @@ -1620,13 +1534,13 @@ static int ath10k_pci_init_config(struct ath10k *ar) pipe_cfg_addr), &pipe_cfg_targ_addr); if (ret != 0) { - ath10k_err("Failed to get pipe cfg addr: %d\n", ret); + ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); return ret; } if (pipe_cfg_targ_addr == 0) { ret = -EIO; - ath10k_err("Invalid pipe cfg addr\n"); + ath10k_err(ar, "Invalid pipe cfg addr\n"); return ret; } @@ -1635,7 +1549,7 @@ static int ath10k_pci_init_config(struct ath10k *ar) sizeof(target_ce_config_wlan)); if (ret != 0) { - ath10k_err("Failed to write pipe cfg: %d\n", ret); + ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); return ret; } @@ -1644,13 +1558,13 @@ static int ath10k_pci_init_config(struct ath10k *ar) svc_to_pipe_map), &svc_to_pipe_map); if (ret != 0) { - ath10k_err("Failed to get svc/pipe map: %d\n", ret); + ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); return ret; } if (svc_to_pipe_map == 0) { ret = -EIO; - ath10k_err("Invalid svc_to_pipe map\n"); + ath10k_err(ar, "Invalid svc_to_pipe map\n"); return ret; } @@ -1658,7 +1572,7 @@ static int ath10k_pci_init_config(struct ath10k *ar) target_service_to_ce_map_wlan, sizeof(target_service_to_ce_map_wlan)); if (ret != 0) { - ath10k_err("Failed to write svc/pipe map: %d\n", ret); + ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); return ret; } @@ -1667,18 +1581,17 @@ static int ath10k_pci_init_config(struct ath10k *ar) config_flags), &pcie_config_flags); if (ret != 0) { - ath10k_err("Failed to get pcie config_flags: %d\n", ret); + ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); return ret; } pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; - ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr + + ret = ath10k_pci_diag_write_access(ar, pcie_state_targ_addr + offsetof(struct pcie_state, config_flags), - &pcie_config_flags, - sizeof(pcie_config_flags)); + pcie_config_flags); if (ret != 0) { - ath10k_err("Failed to write pcie config_flags: %d\n", ret); + ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); return ret; } @@ -1687,7 +1600,7 @@ static int ath10k_pci_init_config(struct ath10k *ar) ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value); if (ret != 0) { - ath10k_err("Faile to get early alloc val: %d\n", ret); + ath10k_err(ar, "Faile to get early alloc val: %d\n", ret); return ret; } @@ -1699,7 +1612,7 @@ static int ath10k_pci_init_config(struct ath10k *ar) ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value); if (ret != 0) { - ath10k_err("Failed to set early alloc val: %d\n", ret); + ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); return ret; } @@ -1708,7 +1621,7 @@ static int ath10k_pci_init_config(struct ath10k *ar) ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value); if (ret != 0) { - ath10k_err("Failed to get option val: %d\n", ret); + ath10k_err(ar, "Failed to get option val: %d\n", ret); return ret; } @@ -1716,7 +1629,7 @@ static int ath10k_pci_init_config(struct ath10k *ar) ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value); if (ret != 0) { - ath10k_err("Failed to set option val: %d\n", ret); + ath10k_err(ar, "Failed to set option val: %d\n", ret); return ret; } @@ -1730,7 +1643,7 @@ static int ath10k_pci_alloc_ce(struct ath10k *ar) for (i = 0; i < CE_COUNT; i++) { ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); if (ret) { - ath10k_err("failed to allocate copy engine pipe %d: %d\n", + ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", i, ret); return ret; } @@ -1761,9 +1674,11 @@ static int ath10k_pci_ce_init(struct ath10k *ar) pipe_info->hif_ce_state = ar; attr = &host_ce_config_wlan[pipe_num]; - ret = ath10k_ce_init_pipe(ar, pipe_num, attr); + ret = ath10k_ce_init_pipe(ar, pipe_num, attr, + ath10k_pci_ce_send_done, + ath10k_pci_ce_recv_data); if (ret) { - ath10k_err("failed to initialize copy engine pipe %d: %d\n", + ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", pipe_num, ret); return ret; } @@ -1783,32 +1698,19 @@ static int ath10k_pci_ce_init(struct ath10k *ar) return 0; } -static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar) +static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - u32 fw_indicator; - - ath10k_pci_wake(ar); - - fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); - - if (fw_indicator & FW_IND_EVENT_PENDING) { - /* ACK: clear Target-side pending event */ - ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, - fw_indicator & ~FW_IND_EVENT_PENDING); + return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & + FW_IND_EVENT_PENDING; +} - if (ar_pci->started) { - ath10k_pci_hif_dump_area(ar); - } else { - /* - * Probable Target failure before we're prepared - * to handle it. Generally unexpected. - */ - ath10k_warn("early firmware event indicated\n"); - } - } +static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) +{ + u32 val; - ath10k_pci_sleep(ar); + val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); + val &= ~FW_IND_EVENT_PENDING; + ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); } /* this function effectively clears target memory controller assert line */ @@ -1833,25 +1735,19 @@ static void ath10k_pci_warm_reset_si0(struct ath10k *ar) static int ath10k_pci_warm_reset(struct ath10k *ar) { - int ret = 0; u32 val; - ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n"); - - ret = ath10k_do_pci_wake(ar); - if (ret) { - ath10k_err("failed to wake up target: %d\n", ret); - return ret; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); /* debug */ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CAUSE_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", + val); val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CPU_INTR_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", val); /* disable pending irqs */ @@ -1894,11 +1790,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) /* debug */ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CAUSE_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", + val); val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CPU_INTR_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n", val); /* CPU warm reset */ @@ -1909,20 +1806,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", + val); msleep(100); - ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); - ath10k_do_pci_sleep(ar); - return ret; + return 0; } static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - const char *irq_mode; int ret; /* @@ -1941,80 +1836,39 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset) ret = ath10k_pci_warm_reset(ar); if (ret) { - ath10k_err("failed to reset target: %d\n", ret); + ath10k_err(ar, "failed to reset target: %d\n", ret); goto err; } - if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - /* Force AWAKE forever */ - ath10k_do_pci_wake(ar); - ret = ath10k_pci_ce_init(ar); if (ret) { - ath10k_err("failed to initialize CE: %d\n", ret); - goto err_ps; - } - - ret = ath10k_ce_disable_interrupts(ar); - if (ret) { - ath10k_err("failed to disable CE interrupts: %d\n", ret); - goto err_ce; - } - - ret = ath10k_pci_init_irq(ar); - if (ret) { - ath10k_err("failed to init irqs: %d\n", ret); - goto err_ce; - } - - ret = ath10k_pci_request_early_irq(ar); - if (ret) { - ath10k_err("failed to request early irq: %d\n", ret); - goto err_deinit_irq; + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto err; } ret = ath10k_pci_wait_for_target_init(ar); if (ret) { - ath10k_err("failed to wait for target to init: %d\n", ret); - goto err_free_early_irq; + ath10k_err(ar, "failed to wait for target to init: %d\n", ret); + goto err_ce; } ret = ath10k_pci_init_config(ar); if (ret) { - ath10k_err("failed to setup init config: %d\n", ret); - goto err_free_early_irq; + ath10k_err(ar, "failed to setup init config: %d\n", ret); + goto err_ce; } ret = ath10k_pci_wake_target_cpu(ar); if (ret) { - ath10k_err("could not wake up target CPU: %d\n", ret); - goto err_free_early_irq; + ath10k_err(ar, "could not wake up target CPU: %d\n", ret); + goto err_ce; } - if (ar_pci->num_msi_intrs > 1) - irq_mode = "MSI-X"; - else if (ar_pci->num_msi_intrs == 1) - irq_mode = "MSI"; - else - irq_mode = "legacy"; - - if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) - ath10k_info("pci irq %s irq_mode %d reset_mode %d\n", - irq_mode, ath10k_pci_irq_mode, - ath10k_pci_reset_mode); - return 0; -err_free_early_irq: - ath10k_pci_free_early_irq(ar); -err_deinit_irq: - ath10k_pci_deinit_irq(ar); err_ce: ath10k_pci_ce_deinit(ar); ath10k_pci_warm_reset(ar); -err_ps: - if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - ath10k_do_pci_sleep(ar); err: return ret; } @@ -2034,7 +1888,7 @@ static int ath10k_pci_hif_power_up_warm(struct ath10k *ar) if (ret == 0) break; - ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n", + ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n", i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret); } @@ -2045,7 +1899,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) { int ret; - ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); /* * Hardware CUS232 version 2 has some issues with cold reset and the @@ -2057,17 +1911,17 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) */ ret = ath10k_pci_hif_power_up_warm(ar); if (ret) { - ath10k_warn("failed to power up target using warm reset: %d\n", + ath10k_warn(ar, "failed to power up target using warm reset: %d\n", ret); if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) return ret; - ath10k_warn("trying cold reset\n"); + ath10k_warn(ar, "trying cold reset\n"); ret = __ath10k_pci_hif_power_up(ar, true); if (ret) { - ath10k_err("failed to power up target using cold reset too (%d)\n", + ath10k_err(ar, "failed to power up target using cold reset too (%d)\n", ret); return ret; } @@ -2078,18 +1932,9 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) static void ath10k_pci_hif_power_down(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); - ath10k_pci_free_early_irq(ar); - ath10k_pci_kill_tasklet(ar); - ath10k_pci_deinit_irq(ar); - ath10k_pci_ce_deinit(ar); ath10k_pci_warm_reset(ar); - - if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - ath10k_do_pci_sleep(ar); } #ifdef CONFIG_PM @@ -2171,7 +2016,13 @@ static void ath10k_msi_err_tasklet(unsigned long data) { struct ath10k *ar = (struct ath10k *)data; - ath10k_pci_fw_interrupt_handler(ar); + if (!ath10k_pci_has_fw_crashed(ar)) { + ath10k_warn(ar, "received unsolicited fw crash interrupt\n"); + return; + } + + ath10k_pci_fw_crashed_clear(ar); + ath10k_pci_fw_crashed_dump(ar); } /* @@ -2185,7 +2036,8 @@ static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { - ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id); + ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, + ce_id); return IRQ_HANDLED; } @@ -2232,36 +2084,17 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) return IRQ_HANDLED; } -static void ath10k_pci_early_irq_tasklet(unsigned long data) +static void ath10k_pci_tasklet(unsigned long data) { struct ath10k *ar = (struct ath10k *)data; - u32 fw_ind; - int ret; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_warn("failed to wake target in early irq tasklet: %d\n", - ret); + if (ath10k_pci_has_fw_crashed(ar)) { + ath10k_pci_fw_crashed_clear(ar); + ath10k_pci_fw_crashed_dump(ar); return; } - fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); - if (fw_ind & FW_IND_EVENT_PENDING) { - ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, - fw_ind & ~FW_IND_EVENT_PENDING); - ath10k_pci_hif_dump_area(ar); - } - - ath10k_pci_sleep(ar); - ath10k_pci_enable_legacy_irq(ar); -} - -static void ath10k_pci_tasklet(unsigned long data) -{ - struct ath10k *ar = (struct ath10k *)data; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */ ath10k_ce_per_engine_service_any(ar); /* Re-enable legacy irq that was disabled in the irq handler */ @@ -2278,7 +2111,7 @@ static int ath10k_pci_request_irq_msix(struct ath10k *ar) ath10k_pci_msi_fw_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("failed to request MSI-X fw irq %d: %d\n", + ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n", ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); return ret; } @@ -2288,7 +2121,7 @@ static int ath10k_pci_request_irq_msix(struct ath10k *ar) ath10k_pci_per_engine_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("failed to request MSI-X ce irq %d: %d\n", + ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n", ar_pci->pdev->irq + i, ret); for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) @@ -2311,7 +2144,7 @@ static int ath10k_pci_request_irq_msi(struct ath10k *ar) ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("failed to request MSI irq %d: %d\n", + ath10k_warn(ar, "failed to request MSI irq %d: %d\n", ar_pci->pdev->irq, ret); return ret; } @@ -2328,7 +2161,7 @@ static int ath10k_pci_request_irq_legacy(struct ath10k *ar) ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { - ath10k_warn("failed to request legacy irq %d: %d\n", + ath10k_warn(ar, "failed to request legacy irq %d: %d\n", ar_pci->pdev->irq, ret); return ret; } @@ -2349,7 +2182,7 @@ static int ath10k_pci_request_irq(struct ath10k *ar) return ath10k_pci_request_irq_msix(ar); } - ath10k_warn("unknown irq configuration upon request\n"); + ath10k_warn(ar, "unknown irq configuration upon request\n"); return -EINVAL; } @@ -2372,8 +2205,6 @@ static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, (unsigned long)ar); - tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet, - (unsigned long)ar); for (i = 0; i < CE_COUNT; i++) { ar_pci->pipe_info[i].ar_pci = ar_pci; @@ -2385,18 +2216,16 @@ static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) static int ath10k_pci_init_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X, - ar_pci->features); int ret; ath10k_pci_init_irq_tasklets(ar); - if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO && - !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) - ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode); + if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) + ath10k_info(ar, "limiting irq mode to: %d\n", + ath10k_pci_irq_mode); /* Try MSI-X */ - if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) { + if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) { ar_pci->num_msi_intrs = MSI_NUM_REQUEST; ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, ar_pci->num_msi_intrs); @@ -2426,34 +2255,16 @@ static int ath10k_pci_init_irq(struct ath10k *ar) * synchronization checking. */ ar_pci->num_msi_intrs = 0; - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_warn("failed to wake target: %d\n", ret); - return ret; - } - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); - ath10k_pci_sleep(ar); return 0; } -static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar) +static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) { - int ret; - - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_warn("failed to wake target: %d\n", ret); - return ret; - } - ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 0); - ath10k_pci_sleep(ar); - - return 0; } static int ath10k_pci_deinit_irq(struct ath10k *ar) @@ -2462,7 +2273,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar) switch (ar_pci->num_msi_intrs) { case 0: - return ath10k_pci_deinit_irq_legacy(ar); + ath10k_pci_deinit_irq_legacy(ar); + return 0; case 1: /* fall-through */ case MSI_NUM_REQUEST: @@ -2472,7 +2284,7 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar) pci_disable_msi(ar_pci->pdev); } - ath10k_warn("unknown irq configuration upon deinit\n"); + ath10k_warn(ar, "unknown irq configuration upon deinit\n"); return -EINVAL; } @@ -2480,23 +2292,17 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long timeout; - int ret; u32 val; - ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); - - ret = ath10k_pci_wake(ar); - if (ret) { - ath10k_err("failed to wake up target for init: %d\n", ret); - return ret; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); do { val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); - ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", + val); /* target should never return this */ if (val == 0xffffffff) @@ -2511,55 +2317,42 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) if (ar_pci->num_msi_intrs == 0) /* Fix potential race by repeating CORE_BASE writes */ - ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS, - PCIE_INTR_FIRMWARE_MASK | - PCIE_INTR_CE_MASK_ALL); + ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + + PCIE_INTR_ENABLE_ADDRESS, + PCIE_INTR_FIRMWARE_MASK | + PCIE_INTR_CE_MASK_ALL); mdelay(10); } while (time_before(jiffies, timeout)); if (val == 0xffffffff) { - ath10k_err("failed to read device register, device is gone\n"); - ret = -EIO; - goto out; + ath10k_err(ar, "failed to read device register, device is gone\n"); + return -EIO; } if (val & FW_IND_EVENT_PENDING) { - ath10k_warn("device has crashed during init\n"); - ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, - val & ~FW_IND_EVENT_PENDING); - ath10k_pci_hif_dump_area(ar); - ret = -ECOMM; - goto out; + ath10k_warn(ar, "device has crashed during init\n"); + ath10k_pci_fw_crashed_clear(ar); + ath10k_pci_fw_crashed_dump(ar); + return -ECOMM; } if (!(val & FW_IND_INITIALIZED)) { - ath10k_err("failed to receive initialized event from target: %08x\n", + ath10k_err(ar, "failed to receive initialized event from target: %08x\n", val); - ret = -ETIMEDOUT; - goto out; + return -ETIMEDOUT; } - ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n"); - -out: - ath10k_pci_sleep(ar); - return ret; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); + return 0; } static int ath10k_pci_cold_reset(struct ath10k *ar) { - int i, ret; + int i; u32 val; - ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n"); - - ret = ath10k_do_pci_wake(ar); - if (ret) { - ath10k_err("failed to wake up target: %d\n", - ret); - return ret; - } + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); /* Put Target, including PCIe, into RESET. */ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); @@ -2584,169 +2377,198 @@ static int ath10k_pci_cold_reset(struct ath10k *ar) msleep(1); } - ath10k_do_pci_sleep(ar); - - ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n"); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); return 0; } -static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) -{ - int i; - - for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) { - if (!test_bit(i, ar_pci->features)) - continue; - - switch (i) { - case ATH10K_PCI_FEATURE_MSI_X: - ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n"); - break; - case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: - ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n"); - break; - } - } -} - -static int ath10k_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *pci_dev) +static int ath10k_pci_claim(struct ath10k *ar) { - void __iomem *mem; - int ret = 0; - struct ath10k *ar; - struct ath10k_pci *ar_pci; - u32 lcr_val, chip_id; - - ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n"); - - ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL); - if (ar_pci == NULL) - return -ENOMEM; - - ar_pci->pdev = pdev; - ar_pci->dev = &pdev->dev; - - switch (pci_dev->device) { - case QCA988X_2_0_DEVICE_ID: - set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); - break; - default: - ret = -ENODEV; - ath10k_err("Unknown device ID: %d\n", pci_dev->device); - goto err_ar_pci; - } - - if (ath10k_pci_target_ps) - set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features); - - ath10k_pci_dump_features(ar_pci); - - ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops); - if (!ar) { - ath10k_err("failed to create driver core\n"); - ret = -EINVAL; - goto err_ar_pci; - } - - ar_pci->ar = ar; - atomic_set(&ar_pci->keep_awake_count, 0); + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct pci_dev *pdev = ar_pci->pdev; + u32 lcr_val; + int ret; pci_set_drvdata(pdev, ar); ret = pci_enable_device(pdev); if (ret) { - ath10k_err("failed to enable PCI device: %d\n", ret); - goto err_ar; + ath10k_err(ar, "failed to enable pci device: %d\n", ret); + return ret; } - /* Request MMIO resources */ ret = pci_request_region(pdev, BAR_NUM, "ath"); if (ret) { - ath10k_err("failed to request MMIO region: %d\n", ret); + ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, + ret); goto err_device; } - /* - * Target structures have a limit of 32 bit DMA pointers. - * DMA pointers can be wider than 32 bits by default on some systems. - */ + /* Target expects 32 bit DMA. Enforce it. */ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret); + ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); goto err_region; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { - ath10k_err("failed to set consistent DMA mask to 32-bit\n"); + ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", + ret); goto err_region; } - /* Set bus master bit in PCI_COMMAND to enable DMA */ pci_set_master(pdev); - /* - * Temporary FIX: disable ASPM - * Will be removed after the OTP is programmed - */ + /* Workaround: Disable ASPM */ pci_read_config_dword(pdev, 0x80, &lcr_val); pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00)); /* Arrange for access to Target SoC registers. */ - mem = pci_iomap(pdev, BAR_NUM, 0); - if (!mem) { - ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM); + ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); + if (!ar_pci->mem) { + ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); ret = -EIO; goto err_master; } - ar_pci->mem = mem; + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); + return 0; + +err_master: + pci_clear_master(pdev); + +err_region: + pci_release_region(pdev, BAR_NUM); + +err_device: + pci_disable_device(pdev); + + return ret; +} + +static void ath10k_pci_release(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct pci_dev *pdev = ar_pci->pdev; + + pci_iounmap(pdev, ar_pci->mem); + pci_release_region(pdev, BAR_NUM); + pci_clear_master(pdev); + pci_disable_device(pdev); +} + +static int ath10k_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pci_dev) +{ + int ret = 0; + struct ath10k *ar; + struct ath10k_pci *ar_pci; + u32 chip_id; + + ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, + &ath10k_pci_hif_ops); + if (!ar) { + dev_err(&pdev->dev, "failed to allocate core\n"); + return -ENOMEM; + } + + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n"); + + ar_pci = ath10k_pci_priv(ar); + ar_pci->pdev = pdev; + ar_pci->dev = &pdev->dev; + ar_pci->ar = ar; spin_lock_init(&ar_pci->ce_lock); + setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, + (unsigned long)ar); - ret = ath10k_do_pci_wake(ar); + ret = ath10k_pci_claim(ar); if (ret) { - ath10k_err("Failed to get chip id: %d\n", ret); - goto err_iomap; + ath10k_err(ar, "failed to claim device: %d\n", ret); + goto err_core_destroy; } - chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + ret = ath10k_pci_wake(ar); + if (ret) { + ath10k_err(ar, "failed to wake up: %d\n", ret); + goto err_release; + } - ath10k_do_pci_sleep(ar); + chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (chip_id == 0xffffffff) { + ath10k_err(ar, "failed to get chip id\n"); + goto err_sleep; + } ret = ath10k_pci_alloc_ce(ar); if (ret) { - ath10k_err("failed to allocate copy engine pipes: %d\n", ret); - goto err_iomap; + ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", + ret); + goto err_sleep; } - ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); + ath10k_pci_ce_deinit(ar); - ret = ath10k_core_register(ar, chip_id); + ret = ath10k_ce_disable_interrupts(ar); + if (ret) { + ath10k_err(ar, "failed to disable copy engine interrupts: %d\n", + ret); + goto err_free_ce; + } + + /* Workaround: There's no known way to mask all possible interrupts via + * device CSR. The only way to make sure device doesn't assert + * interrupts is to reset it. Interrupts are then disabled on host + * after handlers are registered. + */ + ath10k_pci_warm_reset(ar); + + ret = ath10k_pci_init_irq(ar); if (ret) { - ath10k_err("failed to register driver core: %d\n", ret); + ath10k_err(ar, "failed to init irqs: %d\n", ret); goto err_free_ce; } + ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", + ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs, + ath10k_pci_irq_mode, ath10k_pci_reset_mode); + + ret = ath10k_pci_request_irq(ar); + if (ret) { + ath10k_warn(ar, "failed to request irqs: %d\n", ret); + goto err_deinit_irq; + } + + /* This shouldn't race as the device has been reset above. */ + ath10k_pci_irq_disable(ar); + + ret = ath10k_core_register(ar, chip_id); + if (ret) { + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_free_irq; + } + return 0; +err_free_irq: + ath10k_pci_free_irq(ar); + +err_deinit_irq: + ath10k_pci_deinit_irq(ar); + err_free_ce: ath10k_pci_free_ce(ar); -err_iomap: - pci_iounmap(pdev, mem); -err_master: - pci_clear_master(pdev); -err_region: - pci_release_region(pdev, BAR_NUM); -err_device: - pci_disable_device(pdev); -err_ar: + +err_sleep: + ath10k_pci_sleep(ar); + +err_release: + ath10k_pci_release(ar); + +err_core_destroy: ath10k_core_destroy(ar); -err_ar_pci: - /* call HIF PCI free here */ - kfree(ar_pci); return ret; } @@ -2756,7 +2578,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) struct ath10k *ar = pci_get_drvdata(pdev); struct ath10k_pci *ar_pci; - ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n"); + ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); if (!ar) return; @@ -2767,15 +2589,13 @@ static void ath10k_pci_remove(struct pci_dev *pdev) return; ath10k_core_unregister(ar); + ath10k_pci_free_irq(ar); + ath10k_pci_deinit_irq(ar); + ath10k_pci_ce_deinit(ar); ath10k_pci_free_ce(ar); - - pci_iounmap(pdev, ar_pci->mem); - pci_release_region(pdev, BAR_NUM); - pci_clear_master(pdev); - pci_disable_device(pdev); - + ath10k_pci_sleep(ar); + ath10k_pci_release(ar); ath10k_core_destroy(ar); - kfree(ar_pci); } MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); @@ -2793,7 +2613,8 @@ static int __init ath10k_pci_init(void) ret = pci_register_driver(&ath10k_pci_driver); if (ret) - ath10k_err("failed to register PCI driver: %d\n", ret); + printk(KERN_ERR "failed to register ath10k pci driver: %d\n", + ret); return ret; } @@ -2809,5 +2630,5 @@ module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE); +MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 940129209990..cf36511c7f4d 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -23,9 +23,6 @@ #include "hw.h" #include "ce.h" -/* FW dump area */ -#define REG_DUMP_COUNT_QCA988X 60 - /* * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite */ @@ -103,12 +100,12 @@ struct pcie_state { * NOTE: Structure is shared between Host software and Target firmware! */ struct ce_pipe_config { - u32 pipenum; - u32 pipedir; - u32 nentries; - u32 nbytes_max; - u32 flags; - u32 reserved; + __le32 pipenum; + __le32 pipedir; + __le32 nentries; + __le32 nbytes_max; + __le32 flags; + __le32 reserved; }; /* @@ -130,17 +127,9 @@ struct ce_pipe_config { /* Establish a mapping between a service/direction and a pipe. */ struct service_to_pipe { - u32 service_id; - u32 pipedir; - u32 pipenum; -}; - -enum ath10k_pci_features { - ATH10K_PCI_FEATURE_MSI_X = 0, - ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1, - - /* keep last */ - ATH10K_PCI_FEATURE_COUNT + __le32 service_id; + __le32 pipedir; + __le32 pipenum; }; /* Per-pipe state. */ @@ -169,8 +158,6 @@ struct ath10k_pci { struct ath10k *ar; void __iomem *mem; - DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT); - /* * Number of MSI interrupts granted, 0 --> using legacy PCI line * interrupts. @@ -179,12 +166,6 @@ struct ath10k_pci { struct tasklet_struct intr_tq; struct tasklet_struct msi_fw_err; - struct tasklet_struct early_irq_tasklet; - - int started; - - atomic_t keep_awake_count; - bool verified_awake; struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX]; @@ -198,27 +179,15 @@ struct ath10k_pci { /* Map CE id to ce_state */ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + struct timer_list rx_post_retry; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) { - return ar->hif.priv; -} - -static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); -} - -static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) -{ - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - - iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); + return (struct ath10k_pci *)ar->drv_priv; } +#define ATH10K_PCI_RX_POST_RETRY_MS 50 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ #define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ @@ -242,35 +211,17 @@ static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */ #define DIAG_ACCESS_CE_TIMEOUT_MS 10 -/* - * This API allows the Host to access Target registers directly - * and relatively efficiently over PCIe. - * This allows the Host to avoid extra overhead associated with - * sending a message to firmware and waiting for a response message - * from firmware, as is done on other interconnects. - * - * Yet there is some complexity with direct accesses because the - * Target's power state is not known a priori. The Host must issue - * special PCIe reads/writes in order to explicitly wake the Target - * and to verify that it is awake and will remain awake. - * - * Usage: +/* Target exposes its registers for direct access. However before host can + * access them it needs to make sure the target is awake (ath10k_pci_wake, + * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go + * to sleep unless host tells it to (ath10k_pci_sleep). * - * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space. - * These calls must be bracketed by ath10k_pci_wake and - * ath10k_pci_sleep. A single BEGIN/END pair is adequate for - * multiple READ/WRITE operations. + * If host tries to access target registers without waking it up it can + * scribble over host memory. * - * Use ath10k_pci_wake to put the Target in a state in - * which it is legal for the Host to directly access it. This - * may involve waking the Target from a low power state, which - * may take up to 2Ms! - * - * Use ath10k_pci_sleep to tell the Target that as far as - * this code path is concerned, it no longer needs to remain - * directly accessible. BEGIN/END is under a reference counter; - * multiple code paths may issue BEGIN/END on a single targid. + * If target is asleep waking it up may take up to even 2ms. */ + static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) { @@ -296,25 +247,18 @@ static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); } -int ath10k_do_pci_wake(struct ath10k *ar); -void ath10k_do_pci_sleep(struct ath10k *ar); - -static inline int ath10k_pci_wake(struct ath10k *ar) +static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - return ath10k_do_pci_wake(ar); - - return 0; + return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); } -static inline void ath10k_pci_sleep(struct ath10k *ar) +static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features)) - ath10k_do_pci_sleep(ar); + iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr); } #endif /* _PCI_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c new file mode 100644 index 000000000000..3e1454b74e00 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -0,0 +1,561 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/relay.h> +#include "core.h" +#include "debug.h" + +static void send_fft_sample(struct ath10k *ar, + const struct fft_sample_tlv *fft_sample_tlv) +{ + int length; + + if (!ar->spectral.rfs_chan_spec_scan) + return; + + length = __be16_to_cpu(fft_sample_tlv->length) + + sizeof(*fft_sample_tlv); + relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length); +} + +static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len, + u8 *data) +{ + int dc_pos; + u8 max_exp; + + dc_pos = bin_len / 2; + + /* peak index outside of bins */ + if (dc_pos < max_index || -dc_pos >= max_index) + return 0; + + for (max_exp = 0; max_exp < 8; max_exp++) { + if (data[dc_pos + max_index] == (max_magnitude >> max_exp)) + break; + } + + /* max_exp not found */ + if (data[dc_pos + max_index] != (max_magnitude >> max_exp)) + return 0; + + return max_exp; +} + +int ath10k_spectral_process_fft(struct ath10k *ar, + struct wmi_single_phyerr_rx_event *event, + struct phyerr_fft_report *fftr, + size_t bin_len, u64 tsf) +{ + struct fft_sample_ath10k *fft_sample; + u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS]; + u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag; + u32 reg0, reg1, nf_list1, nf_list2; + u8 chain_idx, *bins; + int dc_pos; + + fft_sample = (struct fft_sample_ath10k *)&buf; + + if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS) + return -EINVAL; + + reg0 = __le32_to_cpu(fftr->reg0); + reg1 = __le32_to_cpu(fftr->reg1); + + length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len; + fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K; + fft_sample->tlv.length = __cpu_to_be16(length); + + /* TODO: there might be a reason why the hardware reports 20/40/80 MHz, + * but the results/plots suggest that its actually 22/44/88 MHz. + */ + switch (event->hdr.chan_width_mhz) { + case 20: + fft_sample->chan_width_mhz = 22; + break; + case 40: + fft_sample->chan_width_mhz = 44; + break; + case 80: + /* TODO: As experiments with an analogue sender and various + * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz) + * show, the particular configuration of 80 MHz/64 bins does + * not match with the other smaples at all. Until the reason + * for that is found, don't report these samples. + */ + if (bin_len == 64) + return -EINVAL; + fft_sample->chan_width_mhz = 88; + break; + default: + fft_sample->chan_width_mhz = event->hdr.chan_width_mhz; + } + + fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB); + fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB); + + peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG); + fft_sample->max_magnitude = __cpu_to_be16(peak_mag); + fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX); + fft_sample->rssi = event->hdr.rssi_combined; + + total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB); + base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB); + fft_sample->total_gain_db = __cpu_to_be16(total_gain_db); + fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db); + + freq1 = __le16_to_cpu(event->hdr.freq1); + freq2 = __le16_to_cpu(event->hdr.freq2); + fft_sample->freq1 = __cpu_to_be16(freq1); + fft_sample->freq2 = __cpu_to_be16(freq2); + + nf_list1 = __le32_to_cpu(event->hdr.nf_list_1); + nf_list2 = __le32_to_cpu(event->hdr.nf_list_2); + chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX); + + switch (chain_idx) { + case 0: + fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu); + break; + case 1: + fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu); + break; + case 2: + fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu); + break; + case 3: + fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu); + break; + } + + bins = (u8 *)fftr; + bins += sizeof(*fftr); + + fft_sample->tsf = __cpu_to_be64(tsf); + + /* max_exp has been directly reported by previous hardware (ath9k), + * maybe its possible to get it by other means? + */ + fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag, + bin_len, bins); + + memcpy(fft_sample->data, bins, bin_len); + + /* DC value (value in the middle) is the blind spot of the spectral + * sample and invalid, interpolate it. + */ + dc_pos = bin_len / 2; + fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] + + fft_sample->data[dc_pos - 1]) / 2; + + send_fft_sample(ar, &fft_sample->tlv); + + return 0; +} + +static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + + if (list_empty(&ar->arvifs)) + return NULL; + + /* if there already is a vif doing spectral, return that. */ + list_for_each_entry(arvif, &ar->arvifs, list) + if (arvif->spectral_enabled) + return arvif; + + /* otherwise, return the first vif. */ + return list_first_entry(&ar->arvifs, typeof(*arvif), list); +} + +static int ath10k_spectral_scan_trigger(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + int res; + int vdev_id; + + lockdep_assert_held(&ar->conf_mutex); + + arvif = ath10k_get_spectral_vdev(ar); + if (!arvif) + return -ENODEV; + vdev_id = arvif->vdev_id; + + if (ar->spectral.mode == SPECTRAL_DISABLED) + return 0; + + res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, + WMI_SPECTRAL_TRIGGER_CMD_CLEAR, + WMI_SPECTRAL_ENABLE_CMD_ENABLE); + if (res < 0) + return res; + + res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, + WMI_SPECTRAL_TRIGGER_CMD_TRIGGER, + WMI_SPECTRAL_ENABLE_CMD_ENABLE); + if (res < 0) + return res; + + return 0; +} + +static int ath10k_spectral_scan_config(struct ath10k *ar, + enum ath10k_spectral_mode mode) +{ + struct wmi_vdev_spectral_conf_arg arg; + struct ath10k_vif *arvif; + int vdev_id, count, res = 0; + + lockdep_assert_held(&ar->conf_mutex); + + arvif = ath10k_get_spectral_vdev(ar); + if (!arvif) + return -ENODEV; + + vdev_id = arvif->vdev_id; + + arvif->spectral_enabled = (mode != SPECTRAL_DISABLED); + ar->spectral.mode = mode; + + res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id, + WMI_SPECTRAL_TRIGGER_CMD_CLEAR, + WMI_SPECTRAL_ENABLE_CMD_DISABLE); + if (res < 0) { + ath10k_warn(ar, "failed to enable spectral scan: %d\n", res); + return res; + } + + if (mode == SPECTRAL_DISABLED) + return 0; + + if (mode == SPECTRAL_BACKGROUND) + count = WMI_SPECTRAL_COUNT_DEFAULT; + else + count = max_t(u8, 1, ar->spectral.config.count); + + arg.vdev_id = vdev_id; + arg.scan_count = count; + arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT; + arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT; + arg.scan_fft_size = ar->spectral.config.fft_size; + arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT; + arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT; + arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT; + arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT; + arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT; + arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT; + arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT; + arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT; + arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT; + arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT; + arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT; + arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT; + arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT; + arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT; + + res = ath10k_wmi_vdev_spectral_conf(ar, &arg); + if (res < 0) { + ath10k_warn(ar, "failed to configure spectral scan: %d\n", res); + return res; + } + + return 0; +} + +static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char *mode = ""; + unsigned int len; + enum ath10k_spectral_mode spectral_mode; + + mutex_lock(&ar->conf_mutex); + spectral_mode = ar->spectral.mode; + mutex_unlock(&ar->conf_mutex); + + switch (spectral_mode) { + case SPECTRAL_DISABLED: + mode = "disable"; + break; + case SPECTRAL_BACKGROUND: + mode = "background"; + break; + case SPECTRAL_MANUAL: + mode = "manual"; + break; + } + + len = strlen(mode); + return simple_read_from_buffer(user_buf, count, ppos, mode, len); +} + +static ssize_t write_file_spec_scan_ctl(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + ssize_t len; + int res; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + mutex_lock(&ar->conf_mutex); + + if (strncmp("trigger", buf, 7) == 0) { + if (ar->spectral.mode == SPECTRAL_MANUAL || + ar->spectral.mode == SPECTRAL_BACKGROUND) { + /* reset the configuration to adopt possibly changed + * debugfs parameters + */ + res = ath10k_spectral_scan_config(ar, + ar->spectral.mode); + if (res < 0) { + ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n", + res); + } + res = ath10k_spectral_scan_trigger(ar); + if (res < 0) { + ath10k_warn(ar, "failed to trigger spectral scan: %d\n", + res); + } + } else { + res = -EINVAL; + } + } else if (strncmp("background", buf, 9) == 0) { + res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND); + } else if (strncmp("manual", buf, 6) == 0) { + res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL); + } else if (strncmp("disable", buf, 7) == 0) { + res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED); + } else { + res = -EINVAL; + } + + mutex_unlock(&ar->conf_mutex); + + if (res < 0) + return res; + + return count; +} + +static const struct file_operations fops_spec_scan_ctl = { + .read = read_file_spec_scan_ctl, + .write = write_file_spec_scan_ctl, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_count(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + unsigned int len; + u8 spectral_count; + + mutex_lock(&ar->conf_mutex); + spectral_count = ar->spectral.config.count; + mutex_unlock(&ar->conf_mutex); + + len = sprintf(buf, "%d\n", spectral_count); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_count(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 0 || val > 255) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + ar->spectral.config.count = val; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_spectral_count = { + .read = read_file_spectral_count, + .write = write_file_spectral_count, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t read_file_spectral_bins(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + char buf[32]; + unsigned int len, bins, fft_size, bin_scale; + + mutex_lock(&ar->conf_mutex); + + fft_size = ar->spectral.config.fft_size; + bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT; + bins = 1 << (fft_size - bin_scale); + + mutex_unlock(&ar->conf_mutex); + + len = sprintf(buf, "%d\n", bins); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t write_file_spectral_bins(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath10k *ar = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS) + return -EINVAL; + + if (!is_power_of_2(val)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + ar->spectral.config.fft_size = ilog2(val); + ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_spectral_bins = { + .read = read_file_spectral_bins, + .write = write_file_spectral_bins, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static struct dentry *create_buf_file_handler(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + struct dentry *buf_file; + + buf_file = debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); + *is_global = 1; + return buf_file; +} + +static int remove_buf_file_handler(struct dentry *dentry) +{ + debugfs_remove(dentry); + + return 0; +} + +static struct rchan_callbacks rfs_spec_scan_cb = { + .create_buf_file = create_buf_file_handler, + .remove_buf_file = remove_buf_file_handler, +}; + +int ath10k_spectral_start(struct ath10k *ar) +{ + struct ath10k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) + arvif->spectral_enabled = 0; + + ar->spectral.mode = SPECTRAL_DISABLED; + ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT; + ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT; + + return 0; +} + +int ath10k_spectral_vif_stop(struct ath10k_vif *arvif) +{ + if (!arvif->spectral_enabled) + return 0; + + return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED); +} + +int ath10k_spectral_create(struct ath10k *ar) +{ + ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan", + ar->debug.debugfs_phy, + 1024, 256, + &rfs_spec_scan_cb, NULL); + debugfs_create_file("spectral_scan_ctl", + S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, + &fops_spec_scan_ctl); + debugfs_create_file("spectral_count", + S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, + &fops_spectral_count); + debugfs_create_file("spectral_bins", + S_IRUSR | S_IWUSR, + ar->debug.debugfs_phy, ar, + &fops_spectral_bins); + + return 0; +} + +void ath10k_spectral_destroy(struct ath10k *ar) +{ + if (ar->spectral.rfs_chan_spec_scan) { + relay_close(ar->spectral.rfs_chan_spec_scan); + ar->spectral.rfs_chan_spec_scan = NULL; + } +} diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h new file mode 100644 index 000000000000..ddc57c557272 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/spectral.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef SPECTRAL_H +#define SPECTRAL_H + +#include "../spectral_common.h" + +/** + * struct ath10k_spec_scan - parameters for Atheros spectral scan + * + * @count: number of scan results requested for manual mode + * @fft_size: number of bins to be requested = 2^(fft_size - bin_scale) + */ +struct ath10k_spec_scan { + u8 count; + u8 fft_size; +}; + +/* enum ath10k_spectral_mode: + * + * @SPECTRAL_DISABLED: spectral mode is disabled + * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with + * something else. + * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples + * is performed manually. + */ +enum ath10k_spectral_mode { + SPECTRAL_DISABLED = 0, + SPECTRAL_BACKGROUND, + SPECTRAL_MANUAL, +}; + +#ifdef CONFIG_ATH10K_DEBUGFS + +int ath10k_spectral_process_fft(struct ath10k *ar, + struct wmi_single_phyerr_rx_event *event, + struct phyerr_fft_report *fftr, + size_t bin_len, u64 tsf); +int ath10k_spectral_start(struct ath10k *ar); +int ath10k_spectral_vif_stop(struct ath10k_vif *arvif); +int ath10k_spectral_create(struct ath10k *ar); +void ath10k_spectral_destroy(struct ath10k *ar); + +#else + +static inline int +ath10k_spectral_process_fft(struct ath10k *ar, + struct wmi_single_phyerr_rx_event *event, + struct phyerr_fft_report *fftr, + size_t bin_len, u64 tsf) +{ + return 0; +} + +static inline int ath10k_spectral_start(struct ath10k *ar) +{ + return 0; +} + +static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif) +{ + return 0; +} + +static inline int ath10k_spectral_create(struct ath10k *ar) +{ + return 0; +} + +static inline void ath10k_spectral_destroy(struct ath10k *ar) +{ +} + +#endif /* CONFIG_ATH10K_DEBUGFS */ + +#endif /* SPECTRAL_H */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index f4fa22d1d591..2eeec8a63d5c 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -32,14 +32,14 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb) * offchan_tx_skb. */ spin_lock_bh(&ar->data_lock); if (ar->offchan_tx_skb != skb) { - ath10k_warn("completed old offchannel frame\n"); + ath10k_warn(ar, "completed old offchannel frame\n"); goto out; } complete(&ar->offchan_tx_completed); ar->offchan_tx_skb = NULL; /* just for sanity */ - ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); + ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb); out: spin_unlock_bh(&ar->data_lock); } @@ -47,18 +47,19 @@ out: void ath10k_txrx_tx_unref(struct ath10k_htt *htt, const struct htt_tx_done *tx_done) { - struct device *dev = htt->ar->dev; + struct ath10k *ar = htt->ar; + struct device *dev = ar->dev; struct ieee80211_tx_info *info; struct ath10k_skb_cb *skb_cb; struct sk_buff *msdu; lockdep_assert_held(&htt->tx_lock); - ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n", tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack); if (tx_done->msdu_id >= htt->max_num_pending_tx) { - ath10k_warn("warning: msdu_id %d too big, ignoring\n", + ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n", tx_done->msdu_id); return; } @@ -182,7 +183,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt, wake_up(&ar->peer_mapping_wq); } - ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", ev->vdev_id, ev->addr, ev->peer_id); set_bit(ev->peer_id, peer->peer_ids); @@ -199,12 +200,12 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt, spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, ev->peer_id); if (!peer) { - ath10k_warn("peer-unmap-event: unknown peer id %d\n", + ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n", ev->peer_id); goto exit; } - ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n", peer->vdev_id, peer->addr, ev->peer_id); clear_bit(ev->peer_id, peer->peer_ids); diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index c2c87c916b5a..e500a3cc905e 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -487,6 +487,127 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE, }; +/* firmware 10.2 specific mappings */ +static struct wmi_cmd_map wmi_10_2_cmd_map = { + .init_cmdid = WMI_10_2_INIT_CMDID, + .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID, + .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, + .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, + .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, + .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, + .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, + .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, + .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, + .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, + .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, + .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, + .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, + .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, + .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, + .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, + .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID, + .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID, + .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID, + .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID, + .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID, + .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID, + .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID, + .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID, + .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID, + .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID, + .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID, + .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID, + .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID, + .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID, + .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, + .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, + .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID, + .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID, + .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID, + .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID, + .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID, + .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID, + .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED, + .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID, + .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID, + .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID, + .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID, + .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID, + .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID, + .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID, + .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID, + .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID, + .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID, + .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID, + .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE, + .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, + .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD, + .roam_scan_rssi_change_threshold = + WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE, + .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, + .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, + .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD, + .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO, + .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, + .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE, + .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, + .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED, + .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID, + .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED, + .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, + .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, + .wlan_profile_set_hist_intvl_cmdid = + WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + .wlan_profile_get_profile_data_cmdid = + WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + .wlan_profile_enable_profile_id_cmdid = + WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + .wlan_profile_list_profile_id_cmdid = + WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID, + .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID, + .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID, + .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID, + .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, + .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, + .wow_enable_disable_wake_event_cmdid = + WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID, + .wow_hostwakeup_from_sleep_cmdid = + WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID, + .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID, + .vdev_spectral_scan_configure_cmdid = + WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + .vdev_spectral_scan_enable_cmdid = + WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID, + .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED, + .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED, + .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED, + .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED, + .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED, + .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED, + .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED, + .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED, + .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED, + .echo_cmdid = WMI_10_2_ECHO_CMDID, + .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID, + .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID, + .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID, + .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED, + .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED, + .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID, + .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID, +}; + int ath10k_wmi_wait_for_service_ready(struct ath10k *ar) { int ret; @@ -503,18 +624,18 @@ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar) return ret; } -static struct sk_buff *ath10k_wmi_alloc_skb(u32 len) +static struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len) { struct sk_buff *skb; u32 round_len = roundup(len, 4); - skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len); + skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len); if (!skb) return NULL; skb_reserve(skb, WMI_SKB_HEADROOM); if (!IS_ALIGNED((unsigned long)skb->data, 4)) - ath10k_warn("Unaligned WMI skb\n"); + ath10k_warn(ar, "Unaligned WMI skb\n"); skb_put(skb, round_len); memset(skb->data, 0, round_len); @@ -612,7 +733,7 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, might_sleep(); if (cmd_id == WMI_CMD_UNSUPPORTED) { - ath10k_warn("wmi command %d is not supported by firmware\n", + ath10k_warn(ar, "wmi command %d is not supported by firmware\n", cmd_id); return ret; } @@ -660,7 +781,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) len = round_up(len, 4); - wmi_skb = ath10k_wmi_alloc_skb(len); + wmi_skb = ath10k_wmi_alloc_skb(ar, len); if (!wmi_skb) return -ENOMEM; @@ -674,7 +795,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN); memcpy(cmd->buf, skb->data, skb->len); - ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n", wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); @@ -690,6 +811,130 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb) return ret; } +static void ath10k_wmi_event_scan_started(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_STARTING: + ar->scan.state = ATH10K_SCAN_RUNNING; + + if (ar->scan.is_roc) + ieee80211_ready_on_channel(ar->hw); + + complete(&ar->scan.started); + break; + } +} + +static void ath10k_wmi_event_scan_completed(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + /* One suspected reason scan can be completed while starting is + * if firmware fails to deliver all scan events to the host, + * e.g. when transport pipe is full. This has been observed + * with spectral scan phyerr events starving wmi transport + * pipe. In such case the "scan completed" event should be (and + * is) ignored by the host as it may be just firmware's scan + * state machine recovering. + */ + ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + __ath10k_scan_finish(ar); + break; + } +} + +static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ar->scan_channel = NULL; + break; + } +} + +static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq) +{ + lockdep_assert_held(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n", + ath10k_scan_state_str(ar->scan.state), + ar->scan.state); + break; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); + + if (ar->scan.is_roc && ar->scan.roc_freq == freq) + complete(&ar->scan.on_channel); + break; + } +} + +static const char * +ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type, + enum wmi_scan_completion_reason reason) +{ + switch (type) { + case WMI_SCAN_EVENT_STARTED: + return "started"; + case WMI_SCAN_EVENT_COMPLETED: + switch (reason) { + case WMI_SCAN_REASON_COMPLETED: + return "completed"; + case WMI_SCAN_REASON_CANCELLED: + return "completed [cancelled]"; + case WMI_SCAN_REASON_PREEMPTED: + return "completed [preempted]"; + case WMI_SCAN_REASON_TIMEDOUT: + return "completed [timedout]"; + case WMI_SCAN_REASON_MAX: + break; + } + return "completed [unknown]"; + case WMI_SCAN_EVENT_BSS_CHANNEL: + return "bss channel"; + case WMI_SCAN_EVENT_FOREIGN_CHANNEL: + return "foreign channel"; + case WMI_SCAN_EVENT_DEQUEUED: + return "dequeued"; + case WMI_SCAN_EVENT_PREEMPTED: + return "preempted"; + case WMI_SCAN_EVENT_START_FAILED: + return "start failed"; + default: + return "unknown"; + } +} + static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) { struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data; @@ -707,81 +952,32 @@ static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb) scan_id = __le32_to_cpu(event->scan_id); vdev_id = __le32_to_cpu(event->vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n"); - ath10k_dbg(ATH10K_DBG_WMI, - "scan event type %d reason %d freq %d req_id %d " - "scan_id %d vdev_id %d\n", - event_type, reason, freq, req_id, scan_id, vdev_id); - spin_lock_bh(&ar->data_lock); + ath10k_dbg(ar, ATH10K_DBG_WMI, + "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n", + ath10k_wmi_event_scan_type_str(event_type, reason), + event_type, reason, freq, req_id, scan_id, vdev_id, + ath10k_scan_state_str(ar->scan.state), ar->scan.state); + switch (event_type) { case WMI_SCAN_EVENT_STARTED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n"); - if (ar->scan.in_progress && ar->scan.is_roc) - ieee80211_ready_on_channel(ar->hw); - - complete(&ar->scan.started); + ath10k_wmi_event_scan_started(ar); break; case WMI_SCAN_EVENT_COMPLETED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n"); - switch (reason) { - case WMI_SCAN_REASON_COMPLETED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n"); - break; - case WMI_SCAN_REASON_CANCELLED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n"); - break; - case WMI_SCAN_REASON_PREEMPTED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n"); - break; - case WMI_SCAN_REASON_TIMEDOUT: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n"); - break; - default: - break; - } - - ar->scan_channel = NULL; - if (!ar->scan.in_progress) { - ath10k_warn("no scan requested, ignoring\n"); - break; - } - - if (ar->scan.is_roc) { - ath10k_offchan_tx_purge(ar); - - if (!ar->scan.aborting) - ieee80211_remain_on_channel_expired(ar->hw); - } else { - ieee80211_scan_completed(ar->hw, ar->scan.aborting); - } - - del_timer(&ar->scan.timeout); - complete_all(&ar->scan.completed); - ar->scan.in_progress = false; + ath10k_wmi_event_scan_completed(ar); break; case WMI_SCAN_EVENT_BSS_CHANNEL: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n"); - ar->scan_channel = NULL; + ath10k_wmi_event_scan_bss_chan(ar); break; case WMI_SCAN_EVENT_FOREIGN_CHANNEL: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n"); - ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq); - if (ar->scan.in_progress && ar->scan.is_roc && - ar->scan.roc_freq == freq) { - complete(&ar->scan.on_channel); - } - break; - case WMI_SCAN_EVENT_DEQUEUED: - ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n"); - break; - case WMI_SCAN_EVENT_PREEMPTED: - ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n"); + ath10k_wmi_event_scan_foreign_chan(ar, freq); break; case WMI_SCAN_EVENT_START_FAILED: - ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n"); + ath10k_warn(ar, "received scan start failure event\n"); break; + case WMI_SCAN_EVENT_DEQUEUED: + case WMI_SCAN_EVENT_PREEMPTED: default: break; } @@ -911,7 +1107,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) memset(status, 0, sizeof(*status)); - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx status %08x\n", rx_status); if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) { @@ -947,9 +1143,9 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ) - ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); + ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n"); } else { - ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n"); + ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n"); status->band = phy_mode_to_band(phy_mode); } @@ -979,12 +1175,12 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) } } - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx skb %p len %d ftype %02x stype %02x\n", skb, skb->len, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "event mgmt rx freq %d band %d snr %d, rate_idx %d\n", status->freq, status->band, status->signal, status->rate_idx); @@ -1034,21 +1230,26 @@ static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) rx_clear_count = __le32_to_cpu(ev->rx_clear_count); cycle_count = __le32_to_cpu(ev->cycle_count); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count); spin_lock_bh(&ar->data_lock); - if (!ar->scan.in_progress) { - ath10k_warn("chan info event without a scan request?\n"); + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_warn(ar, "received chan info event without a scan request, ignoring\n"); goto exit; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + break; } idx = freq_to_idx(ar, freq); if (idx >= ARRAY_SIZE(ar->survey)) { - ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n", + ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n", freq, idx); goto exit; } @@ -1079,12 +1280,12 @@ exit: static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n"); } static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n", skb->len); trace_ath10k_wmi_dbglog(skb->data, skb->len); @@ -1097,7 +1298,7 @@ static void ath10k_wmi_event_update_stats(struct ath10k *ar, { struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data; - ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n"); ath10k_debug_read_target_stats(ar, ev); } @@ -1107,7 +1308,7 @@ static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, { struct wmi_vdev_start_response_event *ev; - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n"); ev = (struct wmi_vdev_start_response_event *)skb->data; @@ -1120,7 +1321,7 @@ static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n"); complete(&ar->vdev_setup_done); } @@ -1132,14 +1333,14 @@ static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, ev = (struct wmi_peer_sta_kickout_event *)skb->data; - ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n", ev->peer_macaddr.addr); rcu_read_lock(); sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL); if (!sta) { - ath10k_warn("Spurious quick kickout for STA %pM\n", + ath10k_warn(ar, "Spurious quick kickout for STA %pM\n", ev->peer_macaddr.addr); goto exit; } @@ -1216,7 +1417,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, (u8 *)skb_tail_pointer(bcn) - ies); if (!ie) { if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS) - ath10k_warn("no tim ie found;\n"); + ath10k_warn(ar, "no tim ie found;\n"); return; } @@ -1236,12 +1437,12 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, ie_len += expand_size; pvm_len += expand_size; } else { - ath10k_warn("tim expansion failed\n"); + ath10k_warn(ar, "tim expansion failed\n"); } } if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) { - ath10k_warn("tim pvm length is too great (%d)\n", pvm_len); + ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len); return; } @@ -1255,7 +1456,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar, ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true; } - ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", + ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n", tim->dtim_count, tim->dtim_period, tim->bitmap_ctrl, pvm_len); } @@ -1333,7 +1534,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) return; - ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); + ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) { new_len = ath10k_p2p_calc_noa_ie_len(noa); if (!new_len) @@ -1381,7 +1582,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) ev = (struct wmi_host_swba_event *)skb->data; map = __le32_to_cpu(ev->vdev_map); - ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", + ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n", ev->vdev_map); for (; map; map >>= 1, vdev_id++) { @@ -1391,13 +1592,13 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) i++; if (i >= WMI_MAX_AP_VDEV) { - ath10k_warn("swba has corrupted vdev map\n"); + ath10k_warn(ar, "swba has corrupted vdev map\n"); break; } bcn_info = &ev->bcn_info[i]; - ath10k_dbg(ATH10K_DBG_MGMT, + ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n", i, __le32_to_cpu(bcn_info->tim_info.tim_len), @@ -1411,7 +1612,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) arvif = ath10k_get_arvif(ar, vdev_id); if (arvif == NULL) { - ath10k_warn("no vif for vdev_id %d found\n", vdev_id); + ath10k_warn(ar, "no vif for vdev_id %d found\n", + vdev_id); continue; } @@ -1428,7 +1630,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) bcn = ieee80211_beacon_get(ar->hw, arvif->vif); if (!bcn) { - ath10k_warn("could not get mac80211 beacon\n"); + ath10k_warn(ar, "could not get mac80211 beacon\n"); continue; } @@ -1440,7 +1642,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) if (arvif->beacon) { if (!arvif->beacon_sent) - ath10k_warn("SWBA overrun on vdev %d\n", + ath10k_warn(ar, "SWBA overrun on vdev %d\n", arvif->vdev_id); dma_unmap_single(arvif->ar->dev, @@ -1456,7 +1658,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb) ret = dma_mapping_error(arvif->ar->dev, ATH10K_SKB_CB(bcn)->paddr); if (ret) { - ath10k_warn("failed to map beacon: %d\n", ret); + ath10k_warn(ar, "failed to map beacon: %d\n", ret); dev_kfree_skb_any(bcn); goto skip; } @@ -1473,7 +1675,7 @@ skip: static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); } static void ath10k_dfs_radar_report(struct ath10k *ar, @@ -1489,20 +1691,20 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, reg0 = __le32_to_cpu(rr->reg0); reg1 = __le32_to_cpu(rr->reg1); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n", MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP), MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH), MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN), MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF)); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n", MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK), MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX), MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID), MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN), MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK)); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n", MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET), MS(reg1, RADAR_REPORT_REG1_PULSE_DUR)); @@ -1529,25 +1731,25 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, pe.width = width; pe.rssi = rssi; - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n", pe.freq, pe.width, pe.rssi, pe.ts); ATH10K_DFS_STAT_INC(ar, pulses_detected); if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs no pulse pattern detected, yet\n"); return; } - ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n"); + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n"); ATH10K_DFS_STAT_INC(ar, radar_detected); /* Control radar events reporting in debugfs file dfs_block_radar_events */ if (ar->dfs_block_radar_events) { - ath10k_info("DFS Radar detected, but ignored as requested\n"); + ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); return; } @@ -1566,13 +1768,13 @@ static int ath10k_dfs_fft_report(struct ath10k *ar, reg1 = __le32_to_cpu(fftr->reg1); rssi = event->hdr.rssi_combined; - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n", MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB), MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB), MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX), MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX)); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n", MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB), MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB), @@ -1584,7 +1786,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar, /* false event detection */ if (rssi == DFS_RSSI_POSSIBLY_FALSE && peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) { - ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n"); ATH10K_DFS_STAT_INC(ar, pulses_discarded); return -EINVAL; } @@ -1603,7 +1805,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, u8 *tlv_buf; buf_len = __le32_to_cpu(event->hdr.buf_len); - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n", event->hdr.phy_err_code, event->hdr.rssi_combined, __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len); @@ -1616,21 +1818,22 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, while (i < buf_len) { if (i + sizeof(*tlv) > buf_len) { - ath10k_warn("too short buf for tlv header (%d)\n", i); + ath10k_warn(ar, "too short buf for tlv header (%d)\n", + i); return; } tlv = (struct phyerr_tlv *)&event->bufp[i]; tlv_len = __le16_to_cpu(tlv->len); tlv_buf = &event->bufp[i + sizeof(*tlv)]; - ath10k_dbg(ATH10K_DBG_REGULATORY, + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n", tlv_len, tlv->tag, tlv->sig); switch (tlv->tag) { case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY: if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) { - ath10k_warn("too short radar pulse summary (%d)\n", + ath10k_warn(ar, "too short radar pulse summary (%d)\n", i); return; } @@ -1640,7 +1843,8 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar, break; case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) { - ath10k_warn("too short fft report (%d)\n", i); + ath10k_warn(ar, "too short fft report (%d)\n", + i); return; } @@ -1659,7 +1863,54 @@ static void ath10k_wmi_event_spectral_scan(struct ath10k *ar, struct wmi_single_phyerr_rx_event *event, u64 tsf) { - ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n"); + int buf_len, tlv_len, res, i = 0; + struct phyerr_tlv *tlv; + u8 *tlv_buf; + struct phyerr_fft_report *fftr; + size_t fftr_len; + + buf_len = __le32_to_cpu(event->hdr.buf_len); + + while (i < buf_len) { + if (i + sizeof(*tlv) > buf_len) { + ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n", + i); + return; + } + + tlv = (struct phyerr_tlv *)&event->bufp[i]; + tlv_len = __le16_to_cpu(tlv->len); + tlv_buf = &event->bufp[i + sizeof(*tlv)]; + + if (i + sizeof(*tlv) + tlv_len > buf_len) { + ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n", + i); + return; + } + + switch (tlv->tag) { + case PHYERR_TLV_TAG_SEARCH_FFT_REPORT: + if (sizeof(*fftr) > tlv_len) { + ath10k_warn(ar, "failed to parse fft report at byte %d\n", + i); + return; + } + + fftr_len = tlv_len - sizeof(*fftr); + fftr = (struct phyerr_fft_report *)tlv_buf; + res = ath10k_spectral_process_fft(ar, event, + fftr, fftr_len, + tsf); + if (res < 0) { + ath10k_warn(ar, "failed to process fft report: %d\n", + res); + return; + } + break; + } + + i += sizeof(*tlv) + tlv_len; + } } static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) @@ -1674,7 +1925,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) /* Check if combined event available */ if (left_len < sizeof(*comb_event)) { - ath10k_warn("wmi phyerr combined event wrong len\n"); + ath10k_warn(ar, "wmi phyerr combined event wrong len\n"); return; } @@ -1688,7 +1939,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) tsf <<= 32; tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event phyerr count %d tsf64 0x%llX\n", count, tsf); @@ -1696,7 +1947,8 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) for (i = 0; i < count; i++) { /* Check if we can read event header */ if (left_len < sizeof(*event)) { - ath10k_warn("single event (%d) wrong head len\n", i); + ath10k_warn(ar, "single event (%d) wrong head len\n", + i); return; } @@ -1706,7 +1958,7 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) phy_err_code = event->hdr.phy_err_code; if (left_len < buf_len) { - ath10k_warn("single event (%d) wrong buf len\n", i); + ath10k_warn(ar, "single event (%d) wrong buf len\n", i); return; } @@ -1733,13 +1985,13 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n"); } static void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n"); } static void ath10k_wmi_event_debug_print(struct ath10k *ar, @@ -1764,7 +2016,7 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar, } if (i == sizeof(buf) - 1) - ath10k_warn("wmi debug print truncated: %d\n", skb->len); + ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len); /* for some reason the debug prints end with \n, remove that */ if (skb->data[i - 1] == '\n') @@ -1773,108 +2025,108 @@ static void ath10k_wmi_event_debug_print(struct ath10k *ar, /* the last byte is always reserved for the null character */ buf[i] = '\0'; - ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf); } static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n"); } static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n"); } static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n"); } static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n"); } static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n"); } static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n"); } static void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n"); } static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n"); } static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n"); } static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n"); } static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n"); } static void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n"); } static void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n"); } static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n"); } static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n"); } static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n"); } static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb) { - ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n"); } static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, @@ -1894,7 +2146,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id, &paddr, GFP_ATOMIC); if (!ar->wmi.mem_chunks[idx].vaddr) { - ath10k_warn("failed to allocate memory chunk\n"); + ath10k_warn(ar, "failed to allocate memory chunk\n"); return -ENOMEM; } @@ -1912,9 +2164,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_service_ready_event *ev = (void *)skb->data; + DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {}; if (skb->len < sizeof(*ev)) { - ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", + ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n", skb->len, sizeof(*ev)); return; } @@ -1937,7 +2190,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features); if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { - ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", + ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; } @@ -1945,8 +2198,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, ar->ath_common.regulatory.current_rd = __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); - ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, - sizeof(ev->wmi_service_bitmap)); + wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap); + ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); + ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", + ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); if (strlen(ar->hw->wiphy->fw_version) == 0) { snprintf(ar->hw->wiphy->fw_version, @@ -1960,11 +2215,11 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar, /* FIXME: it probably should be better to support this */ if (__le32_to_cpu(ev->num_mem_reqs) > 0) { - ath10k_warn("target requested %d memory chunks; ignoring\n", + ath10k_warn(ar, "target requested %d memory chunks; ignoring\n", __le32_to_cpu(ev->num_mem_reqs)); } - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", __le32_to_cpu(ev->sw_version), __le32_to_cpu(ev->sw_version_1), @@ -1986,9 +2241,10 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i; int ret; struct wmi_service_ready_event_10x *ev = (void *)skb->data; + DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {}; if (skb->len < sizeof(*ev)) { - ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n", + ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n", skb->len, sizeof(*ev)); return; } @@ -2004,7 +2260,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains); if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) { - ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n", + ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n", ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM); ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM; } @@ -2012,8 +2268,10 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, ar->ath_common.regulatory.current_rd = __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd); - ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap, - sizeof(ev->wmi_service_bitmap)); + wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap); + ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap)); + ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ", + ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap)); if (strlen(ar->hw->wiphy->fw_version) == 0) { snprintf(ar->hw->wiphy->fw_version, @@ -2026,7 +2284,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs); if (num_mem_reqs > ATH10K_MAX_MEM_REQS) { - ath10k_warn("requested memory chunks number (%d) exceeds the limit\n", + ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n", num_mem_reqs); return; } @@ -2034,7 +2292,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, if (!num_mem_reqs) goto exit; - ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n", num_mem_reqs); for (i = 0; i < num_mem_reqs; ++i) { @@ -2052,7 +2310,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) num_units = TARGET_10X_NUM_VDEVS + 1; - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n", req_id, __le32_to_cpu(ev->mem_reqs[i].num_units), @@ -2067,7 +2325,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar, } exit: - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n", __le32_to_cpu(ev->sw_version), __le32_to_cpu(ev->abi_version), @@ -2091,7 +2349,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb) memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n", __le32_to_cpu(ev->sw_version), __le32_to_cpu(ev->abi_version), @@ -2211,7 +2469,7 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_ready_event_rx(ar, skb); break; default: - ath10k_warn("Unknown eventid: %d\n", id); + ath10k_warn(ar, "Unknown eventid: %d\n", id); break; } @@ -2318,27 +2576,151 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_ready_event_rx(ar, skb); break; default: - ath10k_warn("Unknown eventid: %d\n", id); + ath10k_warn(ar, "Unknown eventid: %d\n", id); break; } dev_kfree_skb(skb); } +static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_cmd_hdr *cmd_hdr; + enum wmi_10_2_event_id id; + + cmd_hdr = (struct wmi_cmd_hdr *)skb->data; + id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); + + if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) + return; + + trace_ath10k_wmi_event(id, skb->data, skb->len); + + switch (id) { + case WMI_10_2_MGMT_RX_EVENTID: + ath10k_wmi_event_mgmt_rx(ar, skb); + /* mgmt_rx() owns the skb now! */ + return; + case WMI_10_2_SCAN_EVENTID: + ath10k_wmi_event_scan(ar, skb); + break; + case WMI_10_2_CHAN_INFO_EVENTID: + ath10k_wmi_event_chan_info(ar, skb); + break; + case WMI_10_2_ECHO_EVENTID: + ath10k_wmi_event_echo(ar, skb); + break; + case WMI_10_2_DEBUG_MESG_EVENTID: + ath10k_wmi_event_debug_mesg(ar, skb); + break; + case WMI_10_2_UPDATE_STATS_EVENTID: + ath10k_wmi_event_update_stats(ar, skb); + break; + case WMI_10_2_VDEV_START_RESP_EVENTID: + ath10k_wmi_event_vdev_start_resp(ar, skb); + break; + case WMI_10_2_VDEV_STOPPED_EVENTID: + ath10k_wmi_event_vdev_stopped(ar, skb); + break; + case WMI_10_2_PEER_STA_KICKOUT_EVENTID: + ath10k_wmi_event_peer_sta_kickout(ar, skb); + break; + case WMI_10_2_HOST_SWBA_EVENTID: + ath10k_wmi_event_host_swba(ar, skb); + break; + case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID: + ath10k_wmi_event_tbttoffset_update(ar, skb); + break; + case WMI_10_2_PHYERR_EVENTID: + ath10k_wmi_event_phyerr(ar, skb); + break; + case WMI_10_2_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + break; + case WMI_10_2_PROFILE_MATCH: + ath10k_wmi_event_profile_match(ar, skb); + break; + case WMI_10_2_DEBUG_PRINT_EVENTID: + ath10k_wmi_event_debug_print(ar, skb); + break; + case WMI_10_2_PDEV_QVIT_EVENTID: + ath10k_wmi_event_pdev_qvit(ar, skb); + break; + case WMI_10_2_WLAN_PROFILE_DATA_EVENTID: + ath10k_wmi_event_wlan_profile_data(ar, skb); + break; + case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_rtt_measurement_report(ar, skb); + break; + case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID: + ath10k_wmi_event_tsf_measurement_report(ar, skb); + break; + case WMI_10_2_RTT_ERROR_REPORT_EVENTID: + ath10k_wmi_event_rtt_error_report(ar, skb); + break; + case WMI_10_2_WOW_WAKEUP_HOST_EVENTID: + ath10k_wmi_event_wow_wakeup_host(ar, skb); + break; + case WMI_10_2_DCS_INTERFERENCE_EVENTID: + ath10k_wmi_event_dcs_interference(ar, skb); + break; + case WMI_10_2_PDEV_TPC_CONFIG_EVENTID: + ath10k_wmi_event_pdev_tpc_config(ar, skb); + break; + case WMI_10_2_INST_RSSI_STATS_EVENTID: + ath10k_wmi_event_inst_rssi_stats(ar, skb); + break; + case WMI_10_2_VDEV_STANDBY_REQ_EVENTID: + ath10k_wmi_event_vdev_standby_req(ar, skb); + break; + case WMI_10_2_VDEV_RESUME_REQ_EVENTID: + ath10k_wmi_event_vdev_resume_req(ar, skb); + break; + case WMI_10_2_SERVICE_READY_EVENTID: + ath10k_wmi_10x_service_ready_event_rx(ar, skb); + break; + case WMI_10_2_READY_EVENTID: + ath10k_wmi_ready_event_rx(ar, skb); + break; + case WMI_10_2_RTT_KEEPALIVE_EVENTID: + case WMI_10_2_GPIO_INPUT_EVENTID: + case WMI_10_2_PEER_RATECODE_LIST_EVENTID: + case WMI_10_2_GENERIC_BUFFER_EVENTID: + case WMI_10_2_MCAST_BUF_RELEASE_EVENTID: + case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID: + case WMI_10_2_WDS_PEER_EVENTID: + ath10k_dbg(ar, ATH10K_DBG_WMI, + "received event id %d not implemented\n", id); + break; + default: + ath10k_warn(ar, "Unknown eventid: %d\n", id); + break; + } + + dev_kfree_skb(skb); +} static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb) { - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - ath10k_wmi_10x_process_rx(ar, skb); - else + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ath10k_wmi_10_2_process_rx(ar, skb); + else + ath10k_wmi_10x_process_rx(ar, skb); + } else { ath10k_wmi_main_process_rx(ar, skb); + } } /* WMI Initialization functions */ int ath10k_wmi_attach(struct ath10k *ar) { if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { - ar->wmi.cmd = &wmi_10x_cmd_map; + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ar->wmi.cmd = &wmi_10_2_cmd_map; + else + ar->wmi.cmd = &wmi_10x_cmd_map; + ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ar->wmi.pdev_param = &wmi_10x_pdev_param_map; } else { @@ -2388,7 +2770,7 @@ int ath10k_wmi_connect(struct ath10k *ar) status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp); if (status) { - ath10k_warn("failed to connect to WMI CONTROL service status: %d\n", + ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n", status); return status; } @@ -2404,7 +2786,7 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd, struct wmi_pdev_set_regdomain_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2415,7 +2797,7 @@ static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd, cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g); cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n", rd, rd2g, rd5g, ctl2g, ctl5g); @@ -2431,7 +2813,7 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd, struct wmi_pdev_set_regdomain_cmd_10x *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2443,7 +2825,7 @@ static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd, cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g); cmd->dfs_domain = __cpu_to_le32(dfs_reg); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n", rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg); @@ -2473,7 +2855,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar, if (arg->passive) return -EINVAL; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2491,7 +2873,7 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar, cmd->chan.reg_classid = arg->reg_class_id; cmd->chan.antenna_max = arg->max_antenna_gain; - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi set channel mode %d freq %d\n", arg->mode, arg->freq); @@ -2504,7 +2886,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) struct wmi_pdev_suspend_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2518,7 +2900,7 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar) { struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(0); + skb = ath10k_wmi_alloc_skb(ar, 0); if (skb == NULL) return -ENOMEM; @@ -2531,11 +2913,12 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) struct sk_buff *skb; if (id == WMI_PDEV_PARAM_UNSUPPORTED) { - ath10k_warn("pdev param %d not supported by firmware\n", id); + ath10k_warn(ar, "pdev param %d not supported by firmware\n", + id); return -EOPNOTSUPP; } - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2543,7 +2926,7 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value) cmd->param_id = __cpu_to_le32(id); cmd->param_value = __cpu_to_le32(value); - ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n", id, value); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid); } @@ -2610,7 +2993,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) len = sizeof(*cmd) + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); - buf = ath10k_wmi_alloc_skb(len); + buf = ath10k_wmi_alloc_skb(ar, len); if (!buf) return -ENOMEM; @@ -2621,7 +3004,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) goto out; } - ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", ar->wmi.num_mem_chunks); cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); @@ -2634,7 +3017,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) cmd->host_mem_chunks[i].req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi chunk %d len %d requested, addr 0x%llx\n", i, ar->wmi.mem_chunks[i].len, @@ -2643,7 +3026,7 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar) out: memcpy(&cmd->resource_config, &config, sizeof(config)); - ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); } @@ -2701,7 +3084,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) len = sizeof(*cmd) + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); - buf = ath10k_wmi_alloc_skb(len); + buf = ath10k_wmi_alloc_skb(ar, len); if (!buf) return -ENOMEM; @@ -2712,7 +3095,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) goto out; } - ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", ar->wmi.num_mem_chunks); cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); @@ -2725,7 +3108,7 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) cmd->host_mem_chunks[i].req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi chunk %d len %d requested, addr 0x%llx\n", i, ar->wmi.mem_chunks[i].len, @@ -2734,7 +3117,98 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar) out: memcpy(&cmd->resource_config, &config, sizeof(config)); - ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n"); + return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); +} + +static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar) +{ + struct wmi_init_cmd_10_2 *cmd; + struct sk_buff *buf; + struct wmi_resource_config_10x config = {}; + u32 len, val; + int i; + + config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); + config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); + config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); + config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); + config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); + config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); + config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); + config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI); + config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI); + config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE); + + config.scan_max_pending_reqs = + __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS); + + config.bmiss_offload_max_vdev = + __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_vdev = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV); + + config.roam_offload_max_ap_profiles = + __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES); + + config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS); + config.num_mcast_table_elems = + __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS); + + config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE); + config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE); + config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES); + config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE); + config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM); + + val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK; + config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val); + + config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG); + + config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC); + config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES); + + len = sizeof(*cmd) + + (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks); + + buf = ath10k_wmi_alloc_skb(ar, len); + if (!buf) + return -ENOMEM; + + cmd = (struct wmi_init_cmd_10_2 *)buf->data; + + if (ar->wmi.num_mem_chunks == 0) { + cmd->num_host_mem_chunks = 0; + goto out; + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n", + ar->wmi.num_mem_chunks); + + cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); + + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + cmd->host_mem_chunks[i].ptr = + __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + cmd->host_mem_chunks[i].size = + __cpu_to_le32(ar->wmi.mem_chunks[i].len); + cmd->host_mem_chunks[i].req_id = + __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi chunk %d len %d requested, addr 0x%llx\n", + i, + ar->wmi.mem_chunks[i].len, + (unsigned long long)ar->wmi.mem_chunks[i].paddr); + } +out: + memcpy(&cmd->resource_config.common, &config, sizeof(config)); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n"); return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid); } @@ -2742,10 +3216,14 @@ int ath10k_wmi_cmd_init(struct ath10k *ar) { int ret; - if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) - ret = ath10k_wmi_10x_cmd_init(ar); - else + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ret = ath10k_wmi_10_2_cmd_init(ar); + else + ret = ath10k_wmi_10x_cmd_init(ar); + } else { ret = ath10k_wmi_main_cmd_init(ar); + } return ret; } @@ -2822,7 +3300,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar, if (len < 0) return len; /* len contains error code here */ - skb = ath10k_wmi_alloc_skb(len); + skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return -ENOMEM; @@ -2865,8 +3343,8 @@ int ath10k_wmi_start_scan(struct ath10k *ar, channels->num_chan = __cpu_to_le32(arg->n_channels); for (i = 0; i < arg->n_channels; i++) - channels->channel_list[i] = - __cpu_to_le32(arg->channels[i]); + channels->channel_list[i].freq = + __cpu_to_le16(arg->channels[i]); off += sizeof(*channels); off += sizeof(__le32) * arg->n_channels; @@ -2918,7 +3396,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar, return -EINVAL; } - ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n"); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid); } @@ -2960,7 +3438,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) return -EINVAL; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -2976,7 +3454,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg) cmd->scan_id = __cpu_to_le32(scan_id); cmd->scan_req_id = __cpu_to_le32(req_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n", arg->req_id, arg->req_type, arg->u.scan_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid); @@ -2990,7 +3468,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, struct wmi_vdev_create_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3000,7 +3478,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id, cmd->vdev_subtype = __cpu_to_le32(subtype); memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI vdev create: id %d type %d subtype %d macaddr %pM\n", vdev_id, type, subtype, macaddr); @@ -3012,14 +3490,14 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id) struct wmi_vdev_delete_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_delete_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid); @@ -3052,7 +3530,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, else return -EINVAL; /* should not happen, we already check cmd_id */ - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3090,7 +3568,7 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar, cmd->chan.reg_classid = arg->channel.reg_class_id; cmd->chan.antenna_max = arg->channel.max_antenna_gain; - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, " "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id, flags, arg->channel.freq, arg->channel.mode, @@ -3120,14 +3598,14 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id) struct wmi_vdev_stop_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_stop_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid); } @@ -3137,7 +3615,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) struct wmi_vdev_up_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3146,7 +3624,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid) cmd->vdev_assoc_id = __cpu_to_le32(aid); memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n", vdev_id, aid, bssid); @@ -3158,14 +3636,14 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id) struct wmi_vdev_down_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_vdev_down_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt vdev down id 0x%x\n", vdev_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid); @@ -3178,13 +3656,13 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, struct sk_buff *skb; if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) { - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev param %d not supported by firmware\n", param_id); return -EOPNOTSUPP; } - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3193,7 +3671,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev id 0x%x set param %d value %d\n", vdev_id, param_id, param_value); @@ -3211,7 +3689,7 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar, if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) return -EINVAL; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len); if (!skb) return -ENOMEM; @@ -3229,20 +3707,76 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar, if (arg->key_data) memcpy(cmd->key_data, arg->key_data, arg->key_len); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev install key idx %d cipher %d len %d\n", arg->key_idx, arg->key_cipher, arg->key_len); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_install_key_cmdid); } +int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg) +{ + struct wmi_vdev_spectral_conf_cmd *cmd; + struct sk_buff *skb; + u32 cmdid; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + cmd->scan_count = __cpu_to_le32(arg->scan_count); + cmd->scan_period = __cpu_to_le32(arg->scan_period); + cmd->scan_priority = __cpu_to_le32(arg->scan_priority); + cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size); + cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena); + cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena); + cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref); + cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay); + cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr); + cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr); + cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode); + cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode); + cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr); + cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format); + cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode); + cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale); + cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj); + cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask); + + cmdid = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmdid); +} + +int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, + u32 enable) +{ + struct wmi_vdev_spectral_enable_cmd *cmd; + struct sk_buff *skb; + u32 cmdid; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->trigger_cmd = __cpu_to_le32(trigger); + cmd->enable_cmd = __cpu_to_le32(enable); + + cmdid = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid; + return ath10k_wmi_cmd_send(ar, skb, cmdid); +} + int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, const u8 peer_addr[ETH_ALEN]) { struct wmi_peer_create_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3250,7 +3784,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, cmd->vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer create vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid); @@ -3262,7 +3796,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, struct wmi_peer_delete_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3270,7 +3804,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, cmd->vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer delete vdev_id %d peer_addr %pM\n", vdev_id, peer_addr); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid); @@ -3282,7 +3816,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, struct wmi_peer_flush_tids_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3291,7 +3825,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id, cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n", vdev_id, peer_addr, tid_bitmap); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid); @@ -3304,7 +3838,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, struct wmi_peer_set_param_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3314,7 +3848,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, cmd->param_value = __cpu_to_le32(param_value); memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev %d peer 0x%pM set param %d value %d\n", vdev_id, peer_addr, param_id, param_value); @@ -3327,7 +3861,7 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, struct wmi_sta_powersave_mode_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3335,7 +3869,7 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id, cmd->vdev_id = __cpu_to_le32(vdev_id); cmd->sta_ps_mode = __cpu_to_le32(psmode); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi set powersave id 0x%x mode %d\n", vdev_id, psmode); @@ -3350,7 +3884,7 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, struct wmi_sta_powersave_param_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3359,7 +3893,7 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id, cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(value); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sta ps param vdev_id 0x%x param %d value %d\n", vdev_id, param_id, value); return ath10k_wmi_cmd_send(ar, skb, @@ -3375,7 +3909,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, if (!mac) return -EINVAL; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3385,7 +3919,7 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac, cmd->param_value = __cpu_to_le32(value); memcpy(&cmd->peer_macaddr, mac, ETH_ALEN); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n", vdev_id, param_id, value, mac); @@ -3405,7 +3939,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar, len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel); - skb = ath10k_wmi_alloc_skb(len); + skb = ath10k_wmi_alloc_skb(ar, len); if (!skb) return -EINVAL; @@ -3447,24 +3981,12 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar, return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid); } -int ath10k_wmi_peer_assoc(struct ath10k *ar, - const struct wmi_peer_assoc_complete_arg *arg) +static void +ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) { - struct wmi_peer_assoc_complete_cmd *cmd; - struct sk_buff *skb; + struct wmi_common_peer_assoc_complete_cmd *cmd = buf; - if (arg->peer_mpdu_density > 16) - return -EINVAL; - if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) - return -EINVAL; - if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) - return -EINVAL; - - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); - if (!skb) - return -ENOMEM; - - cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(arg->vdev_id); cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); cmd->peer_associd = __cpu_to_le32(arg->peer_aid); @@ -3499,8 +4021,80 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar, __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); cmd->peer_vht_rates.tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); +} + +static void +ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_main_peer_assoc_complete_cmd *cmd = buf; + + ath10k_wmi_peer_assoc_fill(ar, buf, arg); + memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info)); +} + +static void +ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + ath10k_wmi_peer_assoc_fill(ar, buf, arg); +} + +static void +ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf; + int max_mcs, max_nss; + u32 info0; + + /* TODO: Is using max values okay with firmware? */ + max_mcs = 0xf; + max_nss = 0xf; + + info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) | + SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS); + + ath10k_wmi_peer_assoc_fill(ar, buf, arg); + cmd->info0 = __cpu_to_le32(info0); +} + +int ath10k_wmi_peer_assoc(struct ath10k *ar, + const struct wmi_peer_assoc_complete_arg *arg) +{ + struct sk_buff *skb; + int len; + + if (arg->peer_mpdu_density > 16) + return -EINVAL; + if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) + return -EINVAL; + if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) + return -EINVAL; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd); + else + len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd); + } else { + len = sizeof(struct wmi_main_peer_assoc_complete_cmd); + } + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) { + if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features)) + ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg); + else + ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg); + } else { + ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg); + } - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer assoc vdev %d addr %pM (%s)\n", arg->vdev_id, arg->addr, arg->peer_reassoc ? "reassociate" : "new"); @@ -3518,7 +4112,7 @@ int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) int ret; u16 fc; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3532,6 +4126,7 @@ int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) cmd->msdu_id = 0; cmd->frame_control = __cpu_to_le32(fc); cmd->flags = 0; + cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA); if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); @@ -3565,7 +4160,7 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, struct wmi_pdev_set_wmm_params *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3575,7 +4170,7 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi); ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo); - ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n"); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_wmm_params_cmdid); } @@ -3585,14 +4180,14 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id) struct wmi_request_stats_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; cmd = (struct wmi_request_stats_cmd *)skb->data; cmd->stats_id = __cpu_to_le32(stats_id); - ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid); } @@ -3602,7 +4197,7 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar, struct wmi_force_fw_hang_cmd *cmd; struct sk_buff *skb; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3610,7 +4205,7 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar, cmd->type = __cpu_to_le32(type); cmd->delay_ms = __cpu_to_le32(delay_ms); - ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n", type, delay_ms); return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); } @@ -3621,7 +4216,7 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) struct sk_buff *skb; u32 cfg; - skb = ath10k_wmi_alloc_skb(sizeof(*cmd)); + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return -ENOMEM; @@ -3642,7 +4237,7 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) cmd->config_enable = __cpu_to_le32(cfg); cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK); - ath10k_dbg(ATH10K_DBG_WMI, + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi dbglog cfg modules %08x %08x config %08x %08x\n", __le32_to_cpu(cmd->module_enable), __le32_to_cpu(cmd->module_valid), diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index e93df2c10413..e70836586756 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -73,116 +73,279 @@ struct wmi_cmd_hdr { #define HTC_PROTOCOL_VERSION 0x0002 #define WMI_PROTOCOL_VERSION 0x0002 -enum wmi_service_id { - WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */ - WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */ - WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */ - WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */ - WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */ - WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */ - WMI_SERVICE_AP_UAPSD, /* uapsd on AP */ - WMI_SERVICE_AP_DFS, /* DFS on AP */ - WMI_SERVICE_11AC, /* supports 11ac */ - WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/ - WMI_SERVICE_PHYERR, /* PHY error */ - WMI_SERVICE_BCN_FILTER, /* Beacon filter support */ - WMI_SERVICE_RTT, /* RTT (round trip time) support */ - WMI_SERVICE_RATECTRL, /* Rate-control */ - WMI_SERVICE_WOW, /* WOW Support */ - WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */ - WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */ - WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */ - WMI_SERVICE_NLO, /* Network list offload service */ - WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */ - WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */ - WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */ - WMI_SERVICE_CHATTER, /* Chatter service */ - WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */ - WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */ - WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */ - WMI_SERVICE_GPIO, /* GPIO service */ - WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */ - WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */ - WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */ - WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */ - WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */ - - WMI_SERVICE_LAST, - WMI_MAX_SERVICE = 64 /* max service */ +enum wmi_service { + WMI_SERVICE_BEACON_OFFLOAD = 0, + WMI_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_DFS, + WMI_SERVICE_11AC, + WMI_SERVICE_BLOCKACK, + WMI_SERVICE_PHYERR, + WMI_SERVICE_BCN_FILTER, + WMI_SERVICE_RTT, + WMI_SERVICE_RATECTRL, + WMI_SERVICE_WOW, + WMI_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_IRAM_TIDS, + WMI_SERVICE_ARPNS_OFFLOAD, + WMI_SERVICE_NLO, + WMI_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_SCAN_SCH, + WMI_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CHATTER, + WMI_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_GPIO, + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_TX_ENCAP, + WMI_SERVICE_BURST, + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, +}; + +enum wmi_10x_service { + WMI_10X_SERVICE_BEACON_OFFLOAD = 0, + WMI_10X_SERVICE_SCAN_OFFLOAD, + WMI_10X_SERVICE_ROAM_OFFLOAD, + WMI_10X_SERVICE_BCN_MISS_OFFLOAD, + WMI_10X_SERVICE_STA_PWRSAVE, + WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_10X_SERVICE_AP_UAPSD, + WMI_10X_SERVICE_AP_DFS, + WMI_10X_SERVICE_11AC, + WMI_10X_SERVICE_BLOCKACK, + WMI_10X_SERVICE_PHYERR, + WMI_10X_SERVICE_BCN_FILTER, + WMI_10X_SERVICE_RTT, + WMI_10X_SERVICE_RATECTRL, + WMI_10X_SERVICE_WOW, + WMI_10X_SERVICE_RATECTRL_CACHE, + WMI_10X_SERVICE_IRAM_TIDS, + WMI_10X_SERVICE_BURST, + + /* introduced in 10.2 */ + WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_10X_SERVICE_FORCE_FW_HANG, + WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, +}; + +enum wmi_main_service { + WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0, + WMI_MAIN_SERVICE_SCAN_OFFLOAD, + WMI_MAIN_SERVICE_ROAM_OFFLOAD, + WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD, + WMI_MAIN_SERVICE_STA_PWRSAVE, + WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_MAIN_SERVICE_AP_UAPSD, + WMI_MAIN_SERVICE_AP_DFS, + WMI_MAIN_SERVICE_11AC, + WMI_MAIN_SERVICE_BLOCKACK, + WMI_MAIN_SERVICE_PHYERR, + WMI_MAIN_SERVICE_BCN_FILTER, + WMI_MAIN_SERVICE_RTT, + WMI_MAIN_SERVICE_RATECTRL, + WMI_MAIN_SERVICE_WOW, + WMI_MAIN_SERVICE_RATECTRL_CACHE, + WMI_MAIN_SERVICE_IRAM_TIDS, + WMI_MAIN_SERVICE_ARPNS_OFFLOAD, + WMI_MAIN_SERVICE_NLO, + WMI_MAIN_SERVICE_GTK_OFFLOAD, + WMI_MAIN_SERVICE_SCAN_SCH, + WMI_MAIN_SERVICE_CSA_OFFLOAD, + WMI_MAIN_SERVICE_CHATTER, + WMI_MAIN_SERVICE_COEX_FREQAVOID, + WMI_MAIN_SERVICE_PACKET_POWER_SAVE, + WMI_MAIN_SERVICE_FORCE_FW_HANG, + WMI_MAIN_SERVICE_GPIO, + WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_MAIN_SERVICE_STA_KEEP_ALIVE, + WMI_MAIN_SERVICE_TX_ENCAP, }; static inline char *wmi_service_name(int service_id) { +#define SVCSTR(x) case x: return #x + switch (service_id) { - case WMI_SERVICE_BEACON_OFFLOAD: - return "BEACON_OFFLOAD"; - case WMI_SERVICE_SCAN_OFFLOAD: - return "SCAN_OFFLOAD"; - case WMI_SERVICE_ROAM_OFFLOAD: - return "ROAM_OFFLOAD"; - case WMI_SERVICE_BCN_MISS_OFFLOAD: - return "BCN_MISS_OFFLOAD"; - case WMI_SERVICE_STA_PWRSAVE: - return "STA_PWRSAVE"; - case WMI_SERVICE_STA_ADVANCED_PWRSAVE: - return "STA_ADVANCED_PWRSAVE"; - case WMI_SERVICE_AP_UAPSD: - return "AP_UAPSD"; - case WMI_SERVICE_AP_DFS: - return "AP_DFS"; - case WMI_SERVICE_11AC: - return "11AC"; - case WMI_SERVICE_BLOCKACK: - return "BLOCKACK"; - case WMI_SERVICE_PHYERR: - return "PHYERR"; - case WMI_SERVICE_BCN_FILTER: - return "BCN_FILTER"; - case WMI_SERVICE_RTT: - return "RTT"; - case WMI_SERVICE_RATECTRL: - return "RATECTRL"; - case WMI_SERVICE_WOW: - return "WOW"; - case WMI_SERVICE_RATECTRL_CACHE: - return "RATECTRL CACHE"; - case WMI_SERVICE_IRAM_TIDS: - return "IRAM TIDS"; - case WMI_SERVICE_ARPNS_OFFLOAD: - return "ARPNS_OFFLOAD"; - case WMI_SERVICE_NLO: - return "NLO"; - case WMI_SERVICE_GTK_OFFLOAD: - return "GTK_OFFLOAD"; - case WMI_SERVICE_SCAN_SCH: - return "SCAN_SCH"; - case WMI_SERVICE_CSA_OFFLOAD: - return "CSA_OFFLOAD"; - case WMI_SERVICE_CHATTER: - return "CHATTER"; - case WMI_SERVICE_COEX_FREQAVOID: - return "COEX_FREQAVOID"; - case WMI_SERVICE_PACKET_POWER_SAVE: - return "PACKET_POWER_SAVE"; - case WMI_SERVICE_FORCE_FW_HANG: - return "FORCE FW HANG"; - case WMI_SERVICE_GPIO: - return "GPIO"; - case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM: - return "MODULATED DTIM"; - case WMI_STA_UAPSD_BASIC_AUTO_TRIG: - return "BASIC UAPSD"; - case WMI_STA_UAPSD_VAR_AUTO_TRIG: - return "VAR UAPSD"; - case WMI_SERVICE_STA_KEEP_ALIVE: - return "STA KEEP ALIVE"; - case WMI_SERVICE_TX_ENCAP: - return "TX ENCAP"; + SVCSTR(WMI_SERVICE_BEACON_OFFLOAD); + SVCSTR(WMI_SERVICE_SCAN_OFFLOAD); + SVCSTR(WMI_SERVICE_ROAM_OFFLOAD); + SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD); + SVCSTR(WMI_SERVICE_STA_PWRSAVE); + SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE); + SVCSTR(WMI_SERVICE_AP_UAPSD); + SVCSTR(WMI_SERVICE_AP_DFS); + SVCSTR(WMI_SERVICE_11AC); + SVCSTR(WMI_SERVICE_BLOCKACK); + SVCSTR(WMI_SERVICE_PHYERR); + SVCSTR(WMI_SERVICE_BCN_FILTER); + SVCSTR(WMI_SERVICE_RTT); + SVCSTR(WMI_SERVICE_RATECTRL); + SVCSTR(WMI_SERVICE_WOW); + SVCSTR(WMI_SERVICE_RATECTRL_CACHE); + SVCSTR(WMI_SERVICE_IRAM_TIDS); + SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD); + SVCSTR(WMI_SERVICE_NLO); + SVCSTR(WMI_SERVICE_GTK_OFFLOAD); + SVCSTR(WMI_SERVICE_SCAN_SCH); + SVCSTR(WMI_SERVICE_CSA_OFFLOAD); + SVCSTR(WMI_SERVICE_CHATTER); + SVCSTR(WMI_SERVICE_COEX_FREQAVOID); + SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE); + SVCSTR(WMI_SERVICE_FORCE_FW_HANG); + SVCSTR(WMI_SERVICE_GPIO); + SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM); + SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG); + SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG); + SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE); + SVCSTR(WMI_SERVICE_TX_ENCAP); + SVCSTR(WMI_SERVICE_BURST); + SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT); + SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT); default: - return "UNKNOWN SERVICE\n"; + return NULL; } + +#undef SVCSTR +} + +#define WMI_MAX_SERVICE 64 + +#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \ + (__le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ + BIT((svc_id)%(sizeof(u32)))) + +#define SVCMAP(x, y) \ + do { \ + if (WMI_SERVICE_IS_ENABLED((in), (x))) \ + __set_bit(y, out); \ + } while (0) + +static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out) +{ + SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD); + SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD); + SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD); + SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD); + SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE); + SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE); + SVCMAP(WMI_10X_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD); + SVCMAP(WMI_10X_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS); + SVCMAP(WMI_10X_SERVICE_11AC, + WMI_SERVICE_11AC); + SVCMAP(WMI_10X_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK); + SVCMAP(WMI_10X_SERVICE_PHYERR, + WMI_SERVICE_PHYERR); + SVCMAP(WMI_10X_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER); + SVCMAP(WMI_10X_SERVICE_RTT, + WMI_SERVICE_RTT); + SVCMAP(WMI_10X_SERVICE_RATECTRL, + WMI_SERVICE_RATECTRL); + SVCMAP(WMI_10X_SERVICE_WOW, + WMI_SERVICE_WOW); + SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE); + SVCMAP(WMI_10X_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS); + SVCMAP(WMI_10X_SERVICE_BURST, + WMI_SERVICE_BURST); + SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT); + SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG); + SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT, + WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT); +} + +static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out) +{ + SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD, + WMI_SERVICE_BEACON_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD, + WMI_SERVICE_SCAN_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD, + WMI_SERVICE_ROAM_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD, + WMI_SERVICE_BCN_MISS_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE, + WMI_SERVICE_STA_PWRSAVE); + SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE, + WMI_SERVICE_STA_ADVANCED_PWRSAVE); + SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD, + WMI_SERVICE_AP_UAPSD); + SVCMAP(WMI_MAIN_SERVICE_AP_DFS, + WMI_SERVICE_AP_DFS); + SVCMAP(WMI_MAIN_SERVICE_11AC, + WMI_SERVICE_11AC); + SVCMAP(WMI_MAIN_SERVICE_BLOCKACK, + WMI_SERVICE_BLOCKACK); + SVCMAP(WMI_MAIN_SERVICE_PHYERR, + WMI_SERVICE_PHYERR); + SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER, + WMI_SERVICE_BCN_FILTER); + SVCMAP(WMI_MAIN_SERVICE_RTT, + WMI_SERVICE_RTT); + SVCMAP(WMI_MAIN_SERVICE_RATECTRL, + WMI_SERVICE_RATECTRL); + SVCMAP(WMI_MAIN_SERVICE_WOW, + WMI_SERVICE_WOW); + SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE, + WMI_SERVICE_RATECTRL_CACHE); + SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS, + WMI_SERVICE_IRAM_TIDS); + SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD, + WMI_SERVICE_ARPNS_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_NLO, + WMI_SERVICE_NLO); + SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD, + WMI_SERVICE_GTK_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH, + WMI_SERVICE_SCAN_SCH); + SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD, + WMI_SERVICE_CSA_OFFLOAD); + SVCMAP(WMI_MAIN_SERVICE_CHATTER, + WMI_SERVICE_CHATTER); + SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID, + WMI_SERVICE_COEX_FREQAVOID); + SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE, + WMI_SERVICE_PACKET_POWER_SAVE); + SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG, + WMI_SERVICE_FORCE_FW_HANG); + SVCMAP(WMI_MAIN_SERVICE_GPIO, + WMI_SERVICE_GPIO); + SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM, + WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM); + SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG); + SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, + WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG); + SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE, + WMI_SERVICE_STA_KEEP_ALIVE); + SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP, + WMI_SERVICE_TX_ENCAP); } +#undef SVCMAP #define WMI_SERVICE_BM_SIZE \ ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32)) @@ -803,6 +966,159 @@ enum wmi_10x_event_id { WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1, }; +enum wmi_10_2_cmd_id { + WMI_10_2_START_CMDID = 0x9000, + WMI_10_2_END_CMDID = 0x9FFF, + WMI_10_2_INIT_CMDID, + WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID, + WMI_10_2_STOP_SCAN_CMDID, + WMI_10_2_SCAN_CHAN_LIST_CMDID, + WMI_10_2_ECHO_CMDID, + WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, + WMI_10_2_PDEV_SET_CHANNEL_CMDID, + WMI_10_2_PDEV_SET_PARAM_CMDID, + WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID, + WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID, + WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID, + WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID, + WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID, + WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID, + WMI_10_2_PDEV_SET_QUIET_MODE_CMDID, + WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID, + WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID, + WMI_10_2_VDEV_CREATE_CMDID, + WMI_10_2_VDEV_DELETE_CMDID, + WMI_10_2_VDEV_START_REQUEST_CMDID, + WMI_10_2_VDEV_RESTART_REQUEST_CMDID, + WMI_10_2_VDEV_UP_CMDID, + WMI_10_2_VDEV_STOP_CMDID, + WMI_10_2_VDEV_DOWN_CMDID, + WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID, + WMI_10_2_VDEV_RESUME_RESPONSE_CMDID, + WMI_10_2_VDEV_SET_PARAM_CMDID, + WMI_10_2_VDEV_INSTALL_KEY_CMDID, + WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID, + WMI_10_2_PEER_CREATE_CMDID, + WMI_10_2_PEER_DELETE_CMDID, + WMI_10_2_PEER_FLUSH_TIDS_CMDID, + WMI_10_2_PEER_SET_PARAM_CMDID, + WMI_10_2_PEER_ASSOC_CMDID, + WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID, + WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID, + WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID, + WMI_10_2_PEER_MCAST_GROUP_CMDID, + WMI_10_2_BCN_TX_CMDID, + WMI_10_2_BCN_PRB_TMPL_CMDID, + WMI_10_2_BCN_FILTER_RX_CMDID, + WMI_10_2_PRB_REQ_FILTER_RX_CMDID, + WMI_10_2_MGMT_TX_CMDID, + WMI_10_2_ADDBA_CLEAR_RESP_CMDID, + WMI_10_2_ADDBA_SEND_CMDID, + WMI_10_2_ADDBA_STATUS_CMDID, + WMI_10_2_DELBA_SEND_CMDID, + WMI_10_2_ADDBA_SET_RESP_CMDID, + WMI_10_2_SEND_SINGLEAMSDU_CMDID, + WMI_10_2_STA_POWERSAVE_MODE_CMDID, + WMI_10_2_STA_POWERSAVE_PARAM_CMDID, + WMI_10_2_STA_MIMO_PS_MODE_CMDID, + WMI_10_2_DBGLOG_CFG_CMDID, + WMI_10_2_PDEV_DFS_ENABLE_CMDID, + WMI_10_2_PDEV_DFS_DISABLE_CMDID, + WMI_10_2_PDEV_QVIT_CMDID, + WMI_10_2_ROAM_SCAN_MODE, + WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD, + WMI_10_2_ROAM_SCAN_PERIOD, + WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, + WMI_10_2_ROAM_AP_PROFILE, + WMI_10_2_OFL_SCAN_ADD_AP_PROFILE, + WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE, + WMI_10_2_OFL_SCAN_PERIOD, + WMI_10_2_P2P_DEV_SET_DEVICE_INFO, + WMI_10_2_P2P_DEV_SET_DISCOVERABILITY, + WMI_10_2_P2P_GO_SET_BEACON_IE, + WMI_10_2_P2P_GO_SET_PROBE_RESP_IE, + WMI_10_2_AP_PS_PEER_PARAM_CMDID, + WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID, + WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID, + WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID, + WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID, + WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, + WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, + WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, + WMI_10_2_PDEV_SUSPEND_CMDID, + WMI_10_2_PDEV_RESUME_CMDID, + WMI_10_2_ADD_BCN_FILTER_CMDID, + WMI_10_2_RMV_BCN_FILTER_CMDID, + WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID, + WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID, + WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, + WMI_10_2_WOW_ENABLE_CMDID, + WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, + WMI_10_2_RTT_MEASREQ_CMDID, + WMI_10_2_RTT_TSF_CMDID, + WMI_10_2_RTT_KEEPALIVE_CMDID, + WMI_10_2_PDEV_SEND_BCN_CMDID, + WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID, + WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID, + WMI_10_2_REQUEST_STATS_CMDID, + WMI_10_2_GPIO_CONFIG_CMDID, + WMI_10_2_GPIO_OUTPUT_CMDID, + WMI_10_2_VDEV_RATEMASK_CMDID, + WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID, + WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID, + WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID, + WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID, + WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID, + WMI_10_2_FORCE_FW_HANG_CMDID, + WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID, + WMI_10_2_PDEV_SET_CTL_TABLE_CMDID, + WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID, + WMI_10_2_PDEV_RATEPWR_TABLE_CMDID, + WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID, + WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1, +}; + +enum wmi_10_2_event_id { + WMI_10_2_SERVICE_READY_EVENTID = 0x8000, + WMI_10_2_READY_EVENTID, + WMI_10_2_DEBUG_MESG_EVENTID, + WMI_10_2_START_EVENTID = 0x9000, + WMI_10_2_END_EVENTID = 0x9FFF, + WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID, + WMI_10_2_ECHO_EVENTID, + WMI_10_2_UPDATE_STATS_EVENTID, + WMI_10_2_INST_RSSI_STATS_EVENTID, + WMI_10_2_VDEV_START_RESP_EVENTID, + WMI_10_2_VDEV_STANDBY_REQ_EVENTID, + WMI_10_2_VDEV_RESUME_REQ_EVENTID, + WMI_10_2_VDEV_STOPPED_EVENTID, + WMI_10_2_PEER_STA_KICKOUT_EVENTID, + WMI_10_2_HOST_SWBA_EVENTID, + WMI_10_2_TBTTOFFSET_UPDATE_EVENTID, + WMI_10_2_MGMT_RX_EVENTID, + WMI_10_2_CHAN_INFO_EVENTID, + WMI_10_2_PHYERR_EVENTID, + WMI_10_2_ROAM_EVENTID, + WMI_10_2_PROFILE_MATCH, + WMI_10_2_DEBUG_PRINT_EVENTID, + WMI_10_2_PDEV_QVIT_EVENTID, + WMI_10_2_WLAN_PROFILE_DATA_EVENTID, + WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID, + WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID, + WMI_10_2_RTT_ERROR_REPORT_EVENTID, + WMI_10_2_RTT_KEEPALIVE_EVENTID, + WMI_10_2_WOW_WAKEUP_HOST_EVENTID, + WMI_10_2_DCS_INTERFERENCE_EVENTID, + WMI_10_2_PDEV_TPC_CONFIG_EVENTID, + WMI_10_2_GPIO_INPUT_EVENTID, + WMI_10_2_PEER_RATECODE_LIST_EVENTID, + WMI_10_2_GENERIC_BUFFER_EVENTID, + WMI_10_2_MCAST_BUF_RELEASE_EVENTID, + WMI_10_2_MCAST_LIST_AGEOUT_EVENTID, + WMI_10_2_WDS_PEER_EVENTID, + WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1, +}; + enum wmi_phy_mode { MODE_11A = 0, /* 11a Mode */ MODE_11G = 1, /* 11b/g Mode */ @@ -1076,10 +1392,6 @@ struct wlan_host_mem_req { __le32 num_units; } __packed; -#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \ - ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \ - (1 << ((svc_id)%(sizeof(u32))))) != 0) - /* * The following struct holds optional payload for * wmi_service_ready_event,e.g., 11ac pass some of the @@ -1551,6 +1863,16 @@ struct wmi_resource_config_10x { __le32 max_frag_entries; } __packed; +struct wmi_resource_config_10_2 { + struct wmi_resource_config_10x common; + __le32 max_peer_ext_stats; + __le32 smart_ant_cap; /* 0-disable, 1-enable */ + __le32 bk_min_free; + __le32 be_min_free; + __le32 vi_min_free; + __le32 vo_min_free; + __le32 rx_batchmode; /* 0-disable, 1-enable */ +} __packed; #define NUM_UNITS_IS_NUM_VDEVS 0x1 #define NUM_UNITS_IS_NUM_PEERS 0x2 @@ -1588,11 +1910,28 @@ struct wmi_init_cmd_10x { struct host_memory_chunk host_mem_chunks[1]; } __packed; +struct wmi_init_cmd_10_2 { + struct wmi_resource_config_10_2 resource_config; + __le32 num_host_mem_chunks; + + /* + * variable number of host memory chunks. + * This should be the last element in the structure + */ + struct host_memory_chunk host_mem_chunks[1]; +} __packed; + +struct wmi_chan_list_entry { + __le16 freq; + u8 phy_mode; /* valid for 10.2 only */ + u8 reserved; +} __packed; + /* TLV for channel list */ struct wmi_chan_list { __le32 tag; /* WMI_CHAN_LIST_TAG */ __le32 num_chan; - __le32 channel_list[0]; + struct wmi_chan_list_entry channel_list[0]; } __packed; struct wmi_bssid_list { @@ -1821,7 +2160,7 @@ struct wmi_start_scan_arg { u32 n_bssids; u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN]; - u32 channels[64]; + u16 channels[64]; struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; }; @@ -2067,6 +2406,7 @@ struct wmi_comb_phyerr_rx_event { #define PHYERR_TLV_SIG 0xBB #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8 +#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT 0xF9 struct phyerr_radar_report { __le32 reg0; /* RADAR_REPORT_REG0_* */ @@ -2515,6 +2855,19 @@ enum wmi_10x_pdev_param { WMI_10X_PDEV_PARAM_BURST_DUR, /* Set Bursting Enable*/ WMI_10X_PDEV_PARAM_BURST_ENABLE, + + /* following are available as of firmware 10.2 */ + WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA, + WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE, + WMI_10X_PDEV_PARAM_IGMPMLD_TID, + WMI_10X_PDEV_PARAM_ANTENNA_GAIN, + WMI_10X_PDEV_PARAM_RX_DECAP_MODE, + WMI_10X_PDEV_PARAM_RX_FILTER, + WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID, + WMI_10X_PDEV_PARAM_PROXY_STA_MODE, + WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE, + WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER, + WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER, }; struct wmi_pdev_set_param_cmd { @@ -3387,6 +3740,14 @@ enum wmi_10x_vdev_param { WMI_10X_VDEV_PARAM_ENABLE_RTSCTS, WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS, + + /* following are available as of firmware 10.2 */ + WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE, + WMI_10X_VDEV_PARAM_CABQ_MAXDUR, + WMI_10X_VDEV_PARAM_MFPTEST_SET, + WMI_10X_VDEV_PARAM_RTS_FIXED_RATE, + WMI_10X_VDEV_PARAM_VHT_SGIMASK, + WMI_10X_VDEV_PARAM_VHT80_RATEMASK, }; /* slot time long */ @@ -3444,6 +3805,98 @@ struct wmi_vdev_simple_event { /* unsupported VDEV combination */ #define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2 +/* TODO: please add more comments if you have in-depth information */ +struct wmi_vdev_spectral_conf_cmd { + __le32 vdev_id; + + /* number of fft samples to send (0 for infinite) */ + __le32 scan_count; + __le32 scan_period; + __le32 scan_priority; + + /* number of bins in the FFT: 2^(fft_size - bin_scale) */ + __le32 scan_fft_size; + __le32 scan_gc_ena; + __le32 scan_restart_ena; + __le32 scan_noise_floor_ref; + __le32 scan_init_delay; + __le32 scan_nb_tone_thr; + __le32 scan_str_bin_thr; + __le32 scan_wb_rpt_mode; + __le32 scan_rssi_rpt_mode; + __le32 scan_rssi_thr; + __le32 scan_pwr_format; + + /* rpt_mode: Format of FFT report to software for spectral scan + * triggered FFTs: + * 0: No FFT report (only spectral scan summary report) + * 1: 2-dword summary of metrics for each completed FFT + spectral + * scan summary report + * 2: 2-dword summary of metrics for each completed FFT + + * 1x- oversampled bins(in-band) per FFT + spectral scan summary + * report + * 3: 2-dword summary of metrics for each completed FFT + + * 2x- oversampled bins (all) per FFT + spectral scan summary + */ + __le32 scan_rpt_mode; + __le32 scan_bin_scale; + __le32 scan_dbm_adj; + __le32 scan_chn_mask; +} __packed; + +struct wmi_vdev_spectral_conf_arg { + u32 vdev_id; + u32 scan_count; + u32 scan_period; + u32 scan_priority; + u32 scan_fft_size; + u32 scan_gc_ena; + u32 scan_restart_ena; + u32 scan_noise_floor_ref; + u32 scan_init_delay; + u32 scan_nb_tone_thr; + u32 scan_str_bin_thr; + u32 scan_wb_rpt_mode; + u32 scan_rssi_rpt_mode; + u32 scan_rssi_thr; + u32 scan_pwr_format; + u32 scan_rpt_mode; + u32 scan_bin_scale; + u32 scan_dbm_adj; + u32 scan_chn_mask; +}; + +#define WMI_SPECTRAL_ENABLE_DEFAULT 0 +#define WMI_SPECTRAL_COUNT_DEFAULT 0 +#define WMI_SPECTRAL_PERIOD_DEFAULT 35 +#define WMI_SPECTRAL_PRIORITY_DEFAULT 1 +#define WMI_SPECTRAL_FFT_SIZE_DEFAULT 7 +#define WMI_SPECTRAL_GC_ENA_DEFAULT 1 +#define WMI_SPECTRAL_RESTART_ENA_DEFAULT 0 +#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96 +#define WMI_SPECTRAL_INIT_DELAY_DEFAULT 80 +#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12 +#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8 +#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0 +#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0 +#define WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0 +#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0 +#define WMI_SPECTRAL_RPT_MODE_DEFAULT 2 +#define WMI_SPECTRAL_BIN_SCALE_DEFAULT 1 +#define WMI_SPECTRAL_DBM_ADJ_DEFAULT 1 +#define WMI_SPECTRAL_CHN_MASK_DEFAULT 1 + +struct wmi_vdev_spectral_enable_cmd { + __le32 vdev_id; + __le32 trigger_cmd; + __le32 enable_cmd; +} __packed; + +#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1 +#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2 +#define WMI_SPECTRAL_ENABLE_CMD_ENABLE 1 +#define WMI_SPECTRAL_ENABLE_CMD_DISABLE 2 + /* Beacon processing related command and event structures */ struct wmi_bcn_tx_hdr { __le32 vdev_id; @@ -3470,6 +3923,11 @@ enum wmi_bcn_tx_ref_flags { WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2, }; +/* TODO: It is unclear why "no antenna" works while any other seemingly valid + * chainmask yields no beacons on the air at all. + */ +#define WMI_BCN_TX_REF_DEF_ANTENNA 0 + struct wmi_bcn_tx_ref_cmd { __le32 vdev_id; __le32 data_len; @@ -3481,6 +3939,8 @@ struct wmi_bcn_tx_ref_cmd { __le32 frame_control; /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */ __le32 flags; + /* introduced in 10.2 */ + __le32 antenna_mask; } __packed; /* Beacon filter */ @@ -4053,7 +4513,7 @@ struct wmi_peer_set_q_empty_callback_cmd { /* Maximum listen interval supported by hw in units of beacon interval */ #define ATH10K_MAX_HW_LISTEN_INTERVAL 5 -struct wmi_peer_assoc_complete_cmd { +struct wmi_common_peer_assoc_complete_cmd { struct wmi_mac_addr peer_macaddr; __le32 vdev_id; __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */ @@ -4071,11 +4531,30 @@ struct wmi_peer_assoc_complete_cmd { __le32 peer_vht_caps; __le32 peer_phymode; struct wmi_vht_rate_set peer_vht_rates; +}; + +struct wmi_main_peer_assoc_complete_cmd { + struct wmi_common_peer_assoc_complete_cmd cmd; + /* HT Operation Element of the peer. Five bytes packed in 2 * INT32 array and filled from lsb to msb. */ __le32 peer_ht_info[2]; } __packed; +struct wmi_10_1_peer_assoc_complete_cmd { + struct wmi_common_peer_assoc_complete_cmd cmd; +} __packed; + +#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0 +#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f +#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4 +#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0 + +struct wmi_10_2_peer_assoc_complete_cmd { + struct wmi_common_peer_assoc_complete_cmd cmd; + __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */ +} __packed; + struct wmi_peer_assoc_complete_arg { u8 addr[ETH_ALEN]; u32 vdev_id; @@ -4290,6 +4769,10 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id, u32 param_value); int ath10k_wmi_vdev_install_key(struct ath10k *ar, const struct wmi_vdev_install_key_arg *arg); +int ath10k_wmi_vdev_spectral_conf(struct ath10k *ar, + const struct wmi_vdev_spectral_conf_arg *arg); +int ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, + u32 enable); int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, const u8 peer_addr[ETH_ALEN]); int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id, diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index 7106547a14dd..66b6366158b9 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -351,8 +351,7 @@ void ath5k_hw_deinit(struct ath5k_hw *ah) { __set_bit(ATH_STAT_INVALID, ah->status); - if (ah->ah_rf_banks != NULL) - kfree(ah->ah_rf_banks); + kfree(ah->ah_rf_banks); ath5k_eeprom_detach(ah); diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 8ad2550bce7f..59a87247aac4 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1423,7 +1423,7 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, break; } - if (rxs->rate_idx >= 0 && rs->rs_rate == + if (rs->rs_rate == ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short) rxs->flag |= RX_FLAG_SHORTPRE; diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index b8d031ae63c2..399728618fb9 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -62,6 +62,7 @@ #include <linux/export.h> #include <linux/moduleparam.h> +#include <linux/vmalloc.h> #include <linux/seq_file.h> #include <linux/list.h> @@ -894,6 +895,100 @@ static const struct file_operations fops_queue = { .llseek = default_llseek, }; +/* debugfs: eeprom */ + +struct eeprom_private { + u16 *buf; + int len; +}; + +static int open_file_eeprom(struct inode *inode, struct file *file) +{ + struct eeprom_private *ep; + struct ath5k_hw *ah = inode->i_private; + bool res; + int i, ret; + u32 eesize; + u16 val, *buf; + + /* Get eeprom size */ + + res = ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_UPPER, &val); + if (!res) + return -EACCES; + + if (val == 0) { + eesize = AR5K_EEPROM_INFO_MAX + AR5K_EEPROM_INFO_BASE; + } else { + eesize = (val & AR5K_EEPROM_SIZE_UPPER_MASK) << + AR5K_EEPROM_SIZE_ENDLOC_SHIFT; + ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_LOWER, &val); + eesize = eesize | val; + } + + if (eesize > 4096) + return -EINVAL; + + /* Create buffer and read in eeprom */ + + buf = vmalloc(eesize); + if (!buf) { + ret = -ENOMEM; + goto err; + } + + for (i = 0; i < eesize; ++i) { + AR5K_EEPROM_READ(i, val); + buf[i] = val; + } + + /* Create private struct and assign to file */ + + ep = kmalloc(sizeof(*ep), GFP_KERNEL); + if (!ep) { + ret = -ENOMEM; + goto freebuf; + } + + ep->buf = buf; + ep->len = i; + + file->private_data = (void *)ep; + + return 0; + +freebuf: + vfree(buf); +err: + return ret; + +} + +static ssize_t read_file_eeprom(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct eeprom_private *ep = file->private_data; + + return simple_read_from_buffer(user_buf, count, ppos, ep->buf, ep->len); +} + +static int release_file_eeprom(struct inode *inode, struct file *file) +{ + struct eeprom_private *ep = file->private_data; + + vfree(ep->buf); + kfree(ep); + + return 0; +} + +static const struct file_operations fops_eeprom = { + .open = open_file_eeprom, + .read = read_file_eeprom, + .release = release_file_eeprom, + .owner = THIS_MODULE, +}; + void ath5k_debug_init_device(struct ath5k_hw *ah) @@ -921,6 +1016,8 @@ ath5k_debug_init_device(struct ath5k_hw *ah) debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc); + debugfs_create_file("eeprom", S_IRUSR, phydir, ah, &fops_eeprom); + debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah, &fops_frameerrors); diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c index 48a6a69b57bc..2062d1190556 100644 --- a/drivers/net/wireless/ath/ath5k/led.c +++ b/drivers/net/wireless/ath/ath5k/led.c @@ -130,6 +130,7 @@ ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led, led->ah = ah; strncpy(led->name, name, sizeof(led->name)); + led->name[sizeof(led->name)-1] = 0; led->led_dev.name = led->name; led->led_dev.default_trigger = trigger; led->led_dev.brightness_set = ath5k_led_brightness_set; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index e535807c3d89..ba60e37213eb 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -717,6 +717,7 @@ ath6kl_add_bss_if_needed(struct ath6kl_vif *vif, memcpy(ie + 2, vif->ssid, vif->ssid_len); memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len); bss = cfg80211_inform_bss(ar->wiphy, chan, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, 0, cap_val, 100, ie, 2 + vif->ssid_len + beacon_ie_len, 0, GFP_KERNEL); diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index fffd52355123..6e473fa4b13c 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1049,7 +1049,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) ar->hw.reserved_ram_size = le32_to_cpup(val); ath6kl_dbg(ATH6KL_DBG_BOOT, - "found reserved ram size ie 0x%d\n", + "found reserved ram size ie %d\n", ar->hw.reserved_ram_size); break; case ATH6KL_FW_IE_CAPABILITIES: diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index 21516bc65785..933aef025698 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -225,7 +225,7 @@ int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value) ret = ath6kl_hif_diag_write32(ar, address, value); if (ret) { - ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n", + ath6kl_err("failed to write 0x%x during diagnose window to 0x%x\n", address, value); return ret; } diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c index 339d89f14d32..eab0ab976af2 100644 --- a/drivers/net/wireless/ath/ath6kl/sdio.c +++ b/drivers/net/wireless/ath/ath6kl/sdio.c @@ -1400,6 +1400,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = { {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))}, {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))}, {}, }; diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index c44325856b81..a6a5e40b3e98 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -1229,26 +1229,7 @@ static struct usb_driver ath6kl_usb_driver = { .disable_hub_initiated_lpm = 1, }; -static int ath6kl_usb_init(void) -{ - int ret; - - ret = usb_register(&ath6kl_usb_driver); - if (ret) { - ath6kl_err("usb registration failed: %d\n", ret); - return ret; - } - - return 0; -} - -static void ath6kl_usb_exit(void) -{ - usb_deregister(&ath6kl_usb_driver); -} - -module_init(ath6kl_usb_init); -module_exit(ath6kl_usb_exit); +module_usb_driver(ath6kl_usb_driver); MODULE_AUTHOR("Atheros Communications, Inc."); MODULE_DESCRIPTION("Driver support for Atheros AR600x USB devices"); diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 94df345d08c2..b921005ad7ee 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -619,8 +619,7 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len, dlen, freq, vif->probe_req_report); if (vif->probe_req_report || vif->nw_type == AP_NETWORK) - cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, - GFP_ATOMIC); + cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0); return 0; } @@ -659,7 +658,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len, return -EINVAL; } ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq); - cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, GFP_ATOMIC); + cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0); return 0; } @@ -1093,7 +1092,6 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, u8 *buf; struct ieee80211_channel *channel; struct ath6kl *ar = wmi->parent_dev; - struct ieee80211_mgmt *mgmt; struct cfg80211_bss *bss; if (len <= sizeof(struct wmi_bss_info_hdr2)) @@ -1139,39 +1137,15 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len, } } - /* - * In theory, use of cfg80211_inform_bss() would be more natural here - * since we do not have the full frame. However, at least for now, - * cfg80211 can only distinguish Beacon and Probe Response frames from - * each other when using cfg80211_inform_bss_frame(), so let's build a - * fake IEEE 802.11 header to be able to take benefit of this. - */ - mgmt = kmalloc(24 + len, GFP_ATOMIC); - if (mgmt == NULL) - return -EINVAL; - - if (bih->frame_type == BEACON_FTYPE) { - mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | - IEEE80211_STYPE_BEACON); - memset(mgmt->da, 0xff, ETH_ALEN); - } else { - struct net_device *dev = vif->ndev; - - mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | - IEEE80211_STYPE_PROBE_RESP); - memcpy(mgmt->da, dev->dev_addr, ETH_ALEN); - } - mgmt->duration = cpu_to_le16(0); - memcpy(mgmt->sa, bih->bssid, ETH_ALEN); - memcpy(mgmt->bssid, bih->bssid, ETH_ALEN); - mgmt->seq_ctrl = cpu_to_le16(0); - - memcpy(&mgmt->u.beacon, buf, len); - - bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt, - 24 + len, (bih->snr - 95) * 100, - GFP_ATOMIC); - kfree(mgmt); + bss = cfg80211_inform_bss(ar->wiphy, channel, + bih->frame_type == BEACON_FTYPE ? + CFG80211_BSS_FTYPE_BEACON : + CFG80211_BSS_FTYPE_PRESP, + bih->bssid, get_unaligned_le64((__le64 *)buf), + get_unaligned_le16(((__le16 *)buf) + 5), + get_unaligned_le16(((__le16 *)buf) + 4), + buf + 8 + 2 + 2, len - 8 - 2 - 2, + (bih->snr - 95) * 100, GFP_ATOMIC); if (bss == NULL) return -ENOMEM; cfg80211_put_bss(ar->wiphy, bss); diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 8fcc029a76a6..b8f570ed39ad 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -130,6 +130,15 @@ config ATH9K_RFKILL seconds. Turn off to save power, but enable it if you have a platform that can toggle the RF-Kill GPIO. +config ATH9K_CHANNEL_CONTEXT + bool "Channel Context support" + depends on ATH9K + default n + ---help--- + This option enables channel context support in ath9k, which is needed + for multi-channel concurrency. Enable this if P2P PowerSave support + is required. + config ATH9K_HTC tristate "Atheros HTC based wireless cards support" depends on USB && MAC80211 diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 7fc13a8da675..c690601f7276 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -31,6 +31,7 @@ #include "spectral.h" struct ath_node; +struct ath_vif; extern struct ieee80211_ops ath9k_ops; extern int ath9k_modparam_nohwcrypt; @@ -324,6 +325,10 @@ struct ath_rx { u32 ampdu_ref; }; +/*******************/ +/* Channel Context */ +/*******************/ + struct ath_chanctx { struct cfg80211_chan_def chandef; struct list_head vifs; @@ -354,7 +359,9 @@ enum ath_chanctx_event { ATH_CHANCTX_EVENT_BEACON_RECEIVED, ATH_CHANCTX_EVENT_ASSOC, ATH_CHANCTX_EVENT_SWITCH, + ATH_CHANCTX_EVENT_ASSIGN, ATH_CHANCTX_EVENT_UNASSIGN, + ATH_CHANCTX_EVENT_CHANGE, ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL, }; @@ -403,35 +410,121 @@ struct ath_offchannel { int roc_duration; int duration; }; + +#define case_rtn_string(val) case val: return #val + #define ath_for_each_chanctx(_sc, _ctx) \ for (ctx = &sc->chanctx[0]; \ ctx <= &sc->chanctx[ARRAY_SIZE(sc->chanctx) - 1]; \ ctx++) -void ath9k_fill_chanctx_ops(void); -void ath9k_chanctx_force_active(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); +void ath_chanctx_init(struct ath_softc *sc); +void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx, + struct cfg80211_chan_def *chandef); + +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + static inline struct ath_chanctx * ath_chanctx_get(struct ieee80211_chanctx_conf *ctx) { struct ath_chanctx **ptr = (void *) ctx->drv_priv; return *ptr; } -void ath_chanctx_init(struct ath_softc *sc); -void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx, - struct cfg80211_chan_def *chandef); -void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx, - struct cfg80211_chan_def *chandef); + +bool ath9k_is_chanctx_enabled(void); +void ath9k_fill_chanctx_ops(void); +void ath9k_init_channel_context(struct ath_softc *sc); +void ath9k_offchannel_init(struct ath_softc *sc); +void ath9k_deinit_channel_context(struct ath_softc *sc); +int ath9k_init_p2p(struct ath_softc *sc); +void ath9k_deinit_p2p(struct ath_softc *sc); +void ath9k_p2p_remove_vif(struct ath_softc *sc, + struct ieee80211_vif *vif); +void ath9k_p2p_beacon_sync(struct ath_softc *sc); +void ath9k_p2p_bss_info_changed(struct ath_softc *sc, + struct ieee80211_vif *vif); +void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp, + struct sk_buff *skb); +void ath9k_p2p_ps_timer(void *priv); +void ath9k_chanctx_wake_queues(struct ath_softc *sc); void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx); -void ath_offchannel_timer(unsigned long data); -void ath_offchannel_channel_change(struct ath_softc *sc); -void ath_chanctx_offchan_switch(struct ath_softc *sc, - struct ieee80211_channel *chan); -struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc, - bool active); + +void ath_chanctx_beacon_recv_ev(struct ath_softc *sc, u32 ts, + enum ath_chanctx_event ev); +void ath_chanctx_beacon_sent_ev(struct ath_softc *sc, + enum ath_chanctx_event ev); void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, enum ath_chanctx_event ev); -void ath_chanctx_timer(unsigned long data); +void ath_chanctx_set_next(struct ath_softc *sc, bool force); +void ath_offchannel_next(struct ath_softc *sc); +void ath_scan_complete(struct ath_softc *sc, bool abort); +void ath_roc_complete(struct ath_softc *sc, bool abort); + +#else + +static inline bool ath9k_is_chanctx_enabled(void) +{ + return false; +} +static inline void ath9k_fill_chanctx_ops(void) +{ +} +static inline void ath9k_init_channel_context(struct ath_softc *sc) +{ +} +static inline void ath9k_offchannel_init(struct ath_softc *sc) +{ +} +static inline void ath9k_deinit_channel_context(struct ath_softc *sc) +{ +} +static inline void ath_chanctx_beacon_recv_ev(struct ath_softc *sc, u32 ts, + enum ath_chanctx_event ev) +{ +} +static inline void ath_chanctx_beacon_sent_ev(struct ath_softc *sc, + enum ath_chanctx_event ev) +{ +} +static inline void ath_chanctx_event(struct ath_softc *sc, + struct ieee80211_vif *vif, + enum ath_chanctx_event ev) +{ +} +static inline int ath9k_init_p2p(struct ath_softc *sc) +{ + return 0; +} +static inline void ath9k_deinit_p2p(struct ath_softc *sc) +{ +} +static inline void ath9k_p2p_remove_vif(struct ath_softc *sc, + struct ieee80211_vif *vif) +{ +} +static inline void ath9k_p2p_beacon_sync(struct ath_softc *sc) +{ +} +static inline void ath9k_p2p_bss_info_changed(struct ath_softc *sc, + struct ieee80211_vif *vif) +{ +} +static inline void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp, + struct sk_buff *skb) +{ +} +static inline void ath9k_p2p_ps_timer(struct ath_softc *sc) +{ +} +static inline void ath9k_chanctx_wake_queues(struct ath_softc *sc) +{ +} +static inline void ath_chanctx_check_active(struct ath_softc *sc, + struct ath_chanctx *ctx) +{ +} + +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan); int ath_startrecv(struct ath_softc *sc); @@ -583,7 +676,6 @@ void ath9k_csa_update(struct ath_softc *sc); #define ATH_PAPRD_TIMEOUT 100 /* msecs */ #define ATH_PLL_WORK_INTERVAL 100 -void ath_chanctx_work(struct work_struct *work); void ath_tx_complete_poll_work(struct work_struct *work); void ath_reset_work(struct work_struct *work); bool ath_hw_check(struct ath_softc *sc); @@ -597,8 +689,6 @@ int ath_update_survey_stats(struct ath_softc *sc); void ath_update_survey_nf(struct ath_softc *sc, int channel); void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type); void ath_ps_full_sleep(unsigned long data); -void ath9k_p2p_ps_timer(void *priv); -void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif); void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop); /**********/ @@ -849,12 +939,17 @@ struct ath_softc { struct mutex mutex; struct work_struct paprd_work; struct work_struct hw_reset_work; - struct work_struct chanctx_work; struct completion paprd_complete; wait_queue_head_t tx_wait; +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + struct work_struct chanctx_work; struct ath_gen_timer *p2p_ps_timer; struct ath_vif *p2p_ps_vif; + struct ath_chanctx_sched sched; + struct ath_offchannel offchannel; + struct ath_chanctx *next_chan; +#endif unsigned long driver_data; @@ -875,10 +970,7 @@ struct ath_softc { struct cfg80211_chan_def cur_chandef; struct ath_chanctx chanctx[ATH9K_NUM_CHANCTX]; struct ath_chanctx *cur_chan; - struct ath_chanctx *next_chan; spinlock_t chan_lock; - struct ath_offchannel offchannel; - struct ath_chanctx_sched sched; #ifdef CONFIG_MAC80211_LEDS bool led_registered; diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index eaf8f058c151..b2f56d8b9043 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -108,55 +108,6 @@ static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif, ath9k_hw_set_txdesc(ah, bf->bf_desc, &info); } -static void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp, - struct sk_buff *skb) -{ - static const u8 noa_ie_hdr[] = { - WLAN_EID_VENDOR_SPECIFIC, /* type */ - 0, /* length */ - 0x50, 0x6f, 0x9a, /* WFA OUI */ - 0x09, /* P2P subtype */ - 0x0c, /* Notice of Absence */ - 0x00, /* LSB of little-endian len */ - 0x00, /* MSB of little-endian len */ - }; - - struct ieee80211_p2p_noa_attr *noa; - int noa_len, noa_desc, i = 0; - u8 *hdr; - - if (!avp->offchannel_duration && !avp->periodic_noa_duration) - return; - - noa_desc = !!avp->offchannel_duration + !!avp->periodic_noa_duration; - noa_len = 2 + sizeof(struct ieee80211_p2p_noa_desc) * noa_desc; - - hdr = skb_put(skb, sizeof(noa_ie_hdr)); - memcpy(hdr, noa_ie_hdr, sizeof(noa_ie_hdr)); - hdr[1] = sizeof(noa_ie_hdr) + noa_len - 2; - hdr[7] = noa_len; - - noa = (void *) skb_put(skb, noa_len); - memset(noa, 0, noa_len); - - noa->index = avp->noa_index; - if (avp->periodic_noa_duration) { - u32 interval = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval); - - noa->desc[i].count = 255; - noa->desc[i].start_time = cpu_to_le32(avp->periodic_noa_start); - noa->desc[i].duration = cpu_to_le32(avp->periodic_noa_duration); - noa->desc[i].interval = cpu_to_le32(interval); - i++; - } - - if (avp->offchannel_duration) { - noa->desc[i].count = 1; - noa->desc[i].start_time = cpu_to_le32(avp->offchannel_start); - noa->desc[i].duration = cpu_to_le32(avp->offchannel_duration); - } -} - static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -427,9 +378,10 @@ void ath9k_beacon_tasklet(unsigned long data) /* EDMA devices check that in the tx completion function. */ if (!edma) { - if (sc->sched.beacon_pending) - ath_chanctx_event(sc, NULL, + if (ath9k_is_chanctx_enabled()) { + ath_chanctx_beacon_sent_ev(sc, ATH_CHANCTX_EVENT_BEACON_SENT); + } if (ath9k_csa_is_finished(sc, vif)) return; @@ -438,7 +390,10 @@ void ath9k_beacon_tasklet(unsigned long data) if (!vif || !vif->bss_conf.enable_beacon) return; - ath_chanctx_event(sc, vif, ATH_CHANCTX_EVENT_BEACON_PREPARE); + if (ath9k_is_chanctx_enabled()) { + ath_chanctx_event(sc, vif, ATH_CHANCTX_EVENT_BEACON_PREPARE); + } + bf = ath9k_beacon_generate(sc->hw, vif); if (sc->beacon.bmisscnt != 0) { diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index ba214ebdcd16..409f912a67c7 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -101,202 +101,6 @@ static int ath_set_channel(struct ath_softc *sc) return 0; } -static bool -ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, - bool powersave) -{ - struct ieee80211_vif *vif = avp->vif; - struct ieee80211_sta *sta = NULL; - struct ieee80211_hdr_3addr *nullfunc; - struct ath_tx_control txctl; - struct sk_buff *skb; - int band = sc->cur_chan->chandef.chan->band; - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - if (!vif->bss_conf.assoc) - return false; - - skb = ieee80211_nullfunc_get(sc->hw, vif); - if (!skb) - return false; - - nullfunc = (struct ieee80211_hdr_3addr *) skb->data; - if (powersave) - nullfunc->frame_control |= - cpu_to_le16(IEEE80211_FCTL_PM); - - skb_set_queue_mapping(skb, IEEE80211_AC_VO); - if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) { - dev_kfree_skb_any(skb); - return false; - } - break; - default: - return false; - } - - memset(&txctl, 0, sizeof(txctl)); - txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; - txctl.sta = sta; - txctl.force_channel = true; - if (ath_tx_start(sc->hw, skb, &txctl)) { - ieee80211_free_txskb(sc->hw, skb); - return false; - } - - return true; -} - -void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_vif *avp; - bool active = false; - u8 n_active = 0; - - if (!ctx) - return; - - list_for_each_entry(avp, &ctx->vifs, list) { - struct ieee80211_vif *vif = avp->vif; - - switch (vif->type) { - case NL80211_IFTYPE_P2P_CLIENT: - case NL80211_IFTYPE_STATION: - if (vif->bss_conf.assoc) - active = true; - break; - default: - active = true; - break; - } - } - ctx->active = active; - - ath_for_each_chanctx(sc, ctx) { - if (!ctx->assigned || list_empty(&ctx->vifs)) - continue; - n_active++; - } - - if (n_active <= 1) { - clear_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags); - return; - } - if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) - return; - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL); -} - -static bool -ath_chanctx_send_ps_frame(struct ath_softc *sc, bool powersave) -{ - struct ath_vif *avp; - bool sent = false; - - rcu_read_lock(); - list_for_each_entry(avp, &sc->cur_chan->vifs, list) { - if (ath_chanctx_send_vif_ps_frame(sc, avp, powersave)) - sent = true; - } - rcu_read_unlock(); - - return sent; -} - -static bool ath_chanctx_defer_switch(struct ath_softc *sc) -{ - if (sc->cur_chan == &sc->offchannel.chan) - return false; - - switch (sc->sched.state) { - case ATH_CHANCTX_STATE_SWITCH: - return false; - case ATH_CHANCTX_STATE_IDLE: - if (!sc->cur_chan->switch_after_beacon) - return false; - - sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; - break; - default: - break; - } - - return true; -} - -static void ath_chanctx_set_next(struct ath_softc *sc, bool force) -{ - struct timespec ts; - bool measure_time = false; - bool send_ps = false; - - spin_lock_bh(&sc->chan_lock); - if (!sc->next_chan) { - spin_unlock_bh(&sc->chan_lock); - return; - } - - if (!force && ath_chanctx_defer_switch(sc)) { - spin_unlock_bh(&sc->chan_lock); - return; - } - - if (sc->cur_chan != sc->next_chan) { - sc->cur_chan->stopped = true; - spin_unlock_bh(&sc->chan_lock); - - if (sc->next_chan == &sc->offchannel.chan) { - getrawmonotonic(&ts); - measure_time = true; - } - __ath9k_flush(sc->hw, ~0, true); - - if (ath_chanctx_send_ps_frame(sc, true)) - __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), false); - - send_ps = true; - spin_lock_bh(&sc->chan_lock); - - if (sc->cur_chan != &sc->offchannel.chan) { - getrawmonotonic(&sc->cur_chan->tsf_ts); - sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah); - } - } - sc->cur_chan = sc->next_chan; - sc->cur_chan->stopped = false; - sc->next_chan = NULL; - sc->sched.offchannel_duration = 0; - if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE) - sc->sched.state = ATH_CHANCTX_STATE_IDLE; - - spin_unlock_bh(&sc->chan_lock); - - if (sc->sc_ah->chip_fullsleep || - memcmp(&sc->cur_chandef, &sc->cur_chan->chandef, - sizeof(sc->cur_chandef))) { - ath_set_channel(sc); - if (measure_time) - sc->sched.channel_switch_time = - ath9k_hw_get_tsf_offset(&ts, NULL); - } - if (send_ps) - ath_chanctx_send_ps_frame(sc, false); - - ath_offchannel_channel_change(sc); - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_SWITCH); -} - -void ath_chanctx_work(struct work_struct *work) -{ - struct ath_softc *sc = container_of(work, struct ath_softc, - chanctx_work); - mutex_lock(&sc->mutex); - ath_chanctx_set_next(sc, false); - mutex_unlock(&sc->mutex); -} - void ath_chanctx_init(struct ath_softc *sc) { struct ath_chanctx *ctx; @@ -318,115 +122,124 @@ void ath_chanctx_init(struct ath_softc *sc) for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) INIT_LIST_HEAD(&ctx->acq[j]); } - ctx = &sc->offchannel.chan; - cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20); - INIT_LIST_HEAD(&ctx->vifs); - ctx->txpower = ATH_TXPOWER_MAX; - for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) - INIT_LIST_HEAD(&ctx->acq[j]); - sc->offchannel.chan.offchannel = true; - } -void ath9k_chanctx_force_active(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx, + struct cfg80211_chan_def *chandef) { - struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_vif *avp = (struct ath_vif *) vif->drv_priv; - bool changed = false; - - if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) - return; - - if (!avp->chanctx) - return; - - mutex_lock(&sc->mutex); + bool cur_chan; spin_lock_bh(&sc->chan_lock); - if (sc->next_chan || (sc->cur_chan != avp->chanctx)) { - sc->next_chan = avp->chanctx; - changed = true; - } - sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE; + if (chandef) + memcpy(&ctx->chandef, chandef, sizeof(*chandef)); + cur_chan = sc->cur_chan == ctx; spin_unlock_bh(&sc->chan_lock); - if (changed) - ath_chanctx_set_next(sc, true); + if (!cur_chan) { + ath_dbg(common, CHAN_CTX, + "Current context differs from the new context\n"); + return; + } - mutex_unlock(&sc->mutex); + ath_set_channel(sc); } -void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx, - struct cfg80211_chan_def *chandef) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT - spin_lock_bh(&sc->chan_lock); +/**********************************************************/ +/* Functions to handle the channel context state machine. */ +/**********************************************************/ - if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) && - (sc->cur_chan != ctx) && (ctx == &sc->offchannel.chan)) { - sc->sched.offchannel_pending = true; - spin_unlock_bh(&sc->chan_lock); - return; +static const char *offchannel_state_string(enum ath_offchannel_state state) +{ + switch (state) { + case_rtn_string(ATH_OFFCHANNEL_IDLE); + case_rtn_string(ATH_OFFCHANNEL_PROBE_SEND); + case_rtn_string(ATH_OFFCHANNEL_PROBE_WAIT); + case_rtn_string(ATH_OFFCHANNEL_SUSPEND); + case_rtn_string(ATH_OFFCHANNEL_ROC_START); + case_rtn_string(ATH_OFFCHANNEL_ROC_WAIT); + case_rtn_string(ATH_OFFCHANNEL_ROC_DONE); + default: + return "unknown"; } +} - sc->next_chan = ctx; - if (chandef) - ctx->chandef = *chandef; - - if (sc->next_chan == &sc->offchannel.chan) { - sc->sched.offchannel_duration = - TU_TO_USEC(sc->offchannel.duration) + - sc->sched.channel_switch_time; +static const char *chanctx_event_string(enum ath_chanctx_event ev) +{ + switch (ev) { + case_rtn_string(ATH_CHANCTX_EVENT_BEACON_PREPARE); + case_rtn_string(ATH_CHANCTX_EVENT_BEACON_SENT); + case_rtn_string(ATH_CHANCTX_EVENT_TSF_TIMER); + case_rtn_string(ATH_CHANCTX_EVENT_BEACON_RECEIVED); + case_rtn_string(ATH_CHANCTX_EVENT_ASSOC); + case_rtn_string(ATH_CHANCTX_EVENT_SWITCH); + case_rtn_string(ATH_CHANCTX_EVENT_ASSIGN); + case_rtn_string(ATH_CHANCTX_EVENT_UNASSIGN); + case_rtn_string(ATH_CHANCTX_EVENT_CHANGE); + case_rtn_string(ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL); + default: + return "unknown"; } - spin_unlock_bh(&sc->chan_lock); - ieee80211_queue_work(sc->hw, &sc->chanctx_work); } -void ath_chanctx_set_channel(struct ath_softc *sc, struct ath_chanctx *ctx, - struct cfg80211_chan_def *chandef) +static const char *chanctx_state_string(enum ath_chanctx_state state) { - bool cur_chan; + switch (state) { + case_rtn_string(ATH_CHANCTX_STATE_IDLE); + case_rtn_string(ATH_CHANCTX_STATE_WAIT_FOR_BEACON); + case_rtn_string(ATH_CHANCTX_STATE_WAIT_FOR_TIMER); + case_rtn_string(ATH_CHANCTX_STATE_SWITCH); + case_rtn_string(ATH_CHANCTX_STATE_FORCE_ACTIVE); + default: + return "unknown"; + } +} - spin_lock_bh(&sc->chan_lock); - if (chandef) - memcpy(&ctx->chandef, chandef, sizeof(*chandef)); - cur_chan = sc->cur_chan == ctx; - spin_unlock_bh(&sc->chan_lock); +void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_vif *avp; + bool active = false; + u8 n_active = 0; - if (!cur_chan) + if (!ctx) return; - ath_set_channel(sc); -} + list_for_each_entry(avp, &ctx->vifs, list) { + struct ieee80211_vif *vif = avp->vif; -struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc, bool active) -{ - struct ath_chanctx *ctx; + switch (vif->type) { + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_STATION: + if (vif->bss_conf.assoc) + active = true; + break; + default: + active = true; + break; + } + } + ctx->active = active; ath_for_each_chanctx(sc, ctx) { if (!ctx->assigned || list_empty(&ctx->vifs)) continue; - if (active && !ctx->active) - continue; - - if (ctx->switch_after_beacon) - return ctx; + n_active++; } - return &sc->chanctx[0]; -} - -void ath_chanctx_offchan_switch(struct ath_softc *sc, - struct ieee80211_channel *chan) -{ - struct cfg80211_chan_def chandef; - - cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + if (n_active <= 1) { + clear_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags); + return; + } + if (test_and_set_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) + return; - ath_chanctx_switch(sc, &sc->offchannel.chan, &chandef); + if (ath9k_is_chanctx_enabled()) { + ath_chanctx_event(sc, NULL, + ATH_CHANCTX_EVENT_ENABLE_MULTICHANNEL); + } } static struct ath_chanctx * @@ -469,25 +282,22 @@ static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc) prev->tsf_val += offset; } -void ath_chanctx_timer(unsigned long data) -{ - struct ath_softc *sc = (struct ath_softc *) data; - - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); -} - /* Configure the TSF based hardware timer for a channel switch. * Also set up backup software timer, in case the gen timer fails. * This could be caused by a hardware reset. */ static void ath_chanctx_setup_timer(struct ath_softc *sc, u32 tsf_time) { + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_hw *ah = sc->sc_ah; ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, tsf_time, 1000000); tsf_time -= ath9k_hw_gettsf32(ah); tsf_time = msecs_to_jiffies(tsf_time / 1000) + 1; - mod_timer(&sc->sched.timer, tsf_time); + mod_timer(&sc->sched.timer, jiffies + tsf_time); + + ath_dbg(common, CHAN_CTX, + "Setup chanctx timer with timeout: %d ms\n", jiffies_to_msecs(tsf_time)); } void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, @@ -507,33 +317,50 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, spin_lock_bh(&sc->chan_lock); + ath_dbg(common, CHAN_CTX, "cur_chan: %d MHz, event: %s, state: %s\n", + sc->cur_chan->chandef.center_freq1, + chanctx_event_string(ev), + chanctx_state_string(sc->sched.state)); + switch (ev) { case ATH_CHANCTX_EVENT_BEACON_PREPARE: if (avp->offchannel_duration) avp->offchannel_duration = 0; - if (avp->chanctx != sc->cur_chan) + if (avp->chanctx != sc->cur_chan) { + ath_dbg(common, CHAN_CTX, + "Contexts differ, not preparing beacon\n"); break; + } if (sc->sched.offchannel_pending) { sc->sched.offchannel_pending = false; sc->next_chan = &sc->offchannel.chan; sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; + ath_dbg(common, CHAN_CTX, + "Setting offchannel_pending to false\n"); } ctx = ath_chanctx_get_next(sc, sc->cur_chan); if (ctx->active && sc->sched.state == ATH_CHANCTX_STATE_IDLE) { sc->next_chan = ctx; sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; + ath_dbg(common, CHAN_CTX, + "Set next context, move chanctx state to WAIT_FOR_BEACON\n"); } /* if the timer missed its window, use the next interval */ - if (sc->sched.state == ATH_CHANCTX_STATE_WAIT_FOR_TIMER) + if (sc->sched.state == ATH_CHANCTX_STATE_WAIT_FOR_TIMER) { sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; + ath_dbg(common, CHAN_CTX, + "Move chanctx state from WAIT_FOR_TIMER to WAIT_FOR_BEACON\n"); + } if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) break; + ath_dbg(common, CHAN_CTX, "Preparing beacon for vif: %pM\n", vif->addr); + sc->sched.beacon_pending = true; sc->sched.next_tbtt = REG_READ(ah, AR_NEXT_TBTT_TIMER); @@ -577,15 +404,28 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, if (noa_changed) avp->noa_index++; + + ath_dbg(common, CHAN_CTX, + "periodic_noa_duration: %d, periodic_noa_start: %d, noa_index: %d\n", + avp->periodic_noa_duration, + avp->periodic_noa_start, + avp->noa_index); + break; case ATH_CHANCTX_EVENT_BEACON_SENT: - if (!sc->sched.beacon_pending) + if (!sc->sched.beacon_pending) { + ath_dbg(common, CHAN_CTX, + "No pending beacon\n"); break; + } sc->sched.beacon_pending = false; if (sc->sched.state != ATH_CHANCTX_STATE_WAIT_FOR_BEACON) break; + ath_dbg(common, CHAN_CTX, + "Move chanctx state to WAIT_FOR_TIMER\n"); + sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER; ath_chanctx_setup_timer(sc, sc->sched.switch_start_time); break; @@ -597,6 +437,9 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, sc->sched.beacon_pending) sc->sched.beacon_miss++; + ath_dbg(common, CHAN_CTX, + "Move chanctx state to SWITCH\n"); + sc->sched.state = ATH_CHANCTX_STATE_SWITCH; ieee80211_queue_work(sc->hw, &sc->chanctx_work); break; @@ -625,6 +468,9 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, avp->chanctx != sc->cur_chan) break; + ath_dbg(common, CHAN_CTX, + "Move chanctx state from FORCE_ACTIVE to IDLE\n"); + sc->sched.state = ATH_CHANCTX_STATE_IDLE; /* fall through */ case ATH_CHANCTX_EVENT_SWITCH: @@ -640,6 +486,9 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, sc->next_chan = ath_chanctx_get_next(sc, sc->cur_chan); cur_conf = &sc->cur_chan->beacon; + ath_dbg(common, CHAN_CTX, + "Move chanctx state to WAIT_FOR_TIMER (event SWITCH)\n"); + sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_TIMER; tsf_time = TU_TO_USEC(cur_conf->beacon_interval) / 2; @@ -679,7 +528,785 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif, sc->next_chan = ctx; ieee80211_queue_work(sc->hw, &sc->chanctx_work); break; + case ATH_CHANCTX_EVENT_ASSIGN: + /* + * When adding a new channel context, check if a scan + * is in progress and abort it since the addition of + * a new channel context is usually followed by VIF + * assignment, in which case we have to start multi-channel + * operation. + */ + if (test_bit(ATH_OP_SCANNING, &common->op_flags)) { + ath_dbg(common, CHAN_CTX, + "Aborting HW scan to add new context\n"); + + spin_unlock_bh(&sc->chan_lock); + del_timer_sync(&sc->offchannel.timer); + ath_scan_complete(sc, true); + spin_lock_bh(&sc->chan_lock); + } + break; + case ATH_CHANCTX_EVENT_CHANGE: + break; + } + + spin_unlock_bh(&sc->chan_lock); +} + +void ath_chanctx_beacon_sent_ev(struct ath_softc *sc, + enum ath_chanctx_event ev) +{ + if (sc->sched.beacon_pending) + ath_chanctx_event(sc, NULL, ev); +} + +void ath_chanctx_beacon_recv_ev(struct ath_softc *sc, u32 ts, + enum ath_chanctx_event ev) +{ + sc->sched.next_tbtt = ts; + ath_chanctx_event(sc, NULL, ev); +} + +static int ath_scan_channel_duration(struct ath_softc *sc, + struct ieee80211_channel *chan) +{ + struct cfg80211_scan_request *req = sc->offchannel.scan_req; + + if (!req->n_ssids || (chan->flags & IEEE80211_CHAN_NO_IR)) + return (HZ / 9); /* ~110 ms */ + + return (HZ / 16); /* ~60 ms */ +} + +static void ath_chanctx_switch(struct ath_softc *sc, struct ath_chanctx *ctx, + struct cfg80211_chan_def *chandef) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + spin_lock_bh(&sc->chan_lock); + + if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) && + (sc->cur_chan != ctx) && (ctx == &sc->offchannel.chan)) { + sc->sched.offchannel_pending = true; + spin_unlock_bh(&sc->chan_lock); + return; + } + + sc->next_chan = ctx; + if (chandef) { + ctx->chandef = *chandef; + ath_dbg(common, CHAN_CTX, + "Assigned next_chan to %d MHz\n", chandef->center_freq1); + } + + if (sc->next_chan == &sc->offchannel.chan) { + sc->sched.offchannel_duration = + TU_TO_USEC(sc->offchannel.duration) + + sc->sched.channel_switch_time; + + if (chandef) { + ath_dbg(common, CHAN_CTX, + "Offchannel duration for chan %d MHz : %u\n", + chandef->center_freq1, + sc->sched.offchannel_duration); + } + } + spin_unlock_bh(&sc->chan_lock); + ieee80211_queue_work(sc->hw, &sc->chanctx_work); +} + +static void ath_chanctx_offchan_switch(struct ath_softc *sc, + struct ieee80211_channel *chan) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct cfg80211_chan_def chandef; + + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); + ath_dbg(common, CHAN_CTX, + "Channel definition created: %d MHz\n", chandef.center_freq1); + + ath_chanctx_switch(sc, &sc->offchannel.chan, &chandef); +} + +static struct ath_chanctx *ath_chanctx_get_oper_chan(struct ath_softc *sc, + bool active) +{ + struct ath_chanctx *ctx; + + ath_for_each_chanctx(sc, ctx) { + if (!ctx->assigned || list_empty(&ctx->vifs)) + continue; + if (active && !ctx->active) + continue; + + if (ctx->switch_after_beacon) + return ctx; + } + + return &sc->chanctx[0]; +} + +static void +ath_scan_next_channel(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct cfg80211_scan_request *req = sc->offchannel.scan_req; + struct ieee80211_channel *chan; + + if (sc->offchannel.scan_idx >= req->n_channels) { + ath_dbg(common, CHAN_CTX, + "Moving offchannel state to ATH_OFFCHANNEL_IDLE, " + "scan_idx: %d, n_channels: %d\n", + sc->offchannel.scan_idx, + req->n_channels); + + sc->offchannel.state = ATH_OFFCHANNEL_IDLE; + ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false), + NULL); + return; + } + + ath_dbg(common, CHAN_CTX, + "Moving offchannel state to ATH_OFFCHANNEL_PROBE_SEND, scan_idx: %d\n", + sc->offchannel.scan_idx); + + chan = req->channels[sc->offchannel.scan_idx++]; + sc->offchannel.duration = ath_scan_channel_duration(sc, chan); + sc->offchannel.state = ATH_OFFCHANNEL_PROBE_SEND; + + ath_chanctx_offchan_switch(sc, chan); +} + +void ath_offchannel_next(struct ath_softc *sc) +{ + struct ieee80211_vif *vif; + + if (sc->offchannel.scan_req) { + vif = sc->offchannel.scan_vif; + sc->offchannel.chan.txpower = vif->bss_conf.txpower; + ath_scan_next_channel(sc); + } else if (sc->offchannel.roc_vif) { + vif = sc->offchannel.roc_vif; + sc->offchannel.chan.txpower = vif->bss_conf.txpower; + sc->offchannel.duration = sc->offchannel.roc_duration; + sc->offchannel.state = ATH_OFFCHANNEL_ROC_START; + ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan); + } else { + ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false), + NULL); + sc->offchannel.state = ATH_OFFCHANNEL_IDLE; + if (sc->ps_idle) + ath_cancel_work(sc); + } +} + +void ath_roc_complete(struct ath_softc *sc, bool abort) +{ + sc->offchannel.roc_vif = NULL; + sc->offchannel.roc_chan = NULL; + if (!abort) + ieee80211_remain_on_channel_expired(sc->hw); + ath_offchannel_next(sc); + ath9k_ps_restore(sc); +} + +void ath_scan_complete(struct ath_softc *sc, bool abort) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + if (abort) + ath_dbg(common, CHAN_CTX, "HW scan aborted\n"); + else + ath_dbg(common, CHAN_CTX, "HW scan complete\n"); + + sc->offchannel.scan_req = NULL; + sc->offchannel.scan_vif = NULL; + sc->offchannel.state = ATH_OFFCHANNEL_IDLE; + ieee80211_scan_completed(sc->hw, abort); + clear_bit(ATH_OP_SCANNING, &common->op_flags); + ath_offchannel_next(sc); + ath9k_ps_restore(sc); +} + +static void ath_scan_send_probe(struct ath_softc *sc, + struct cfg80211_ssid *ssid) +{ + struct cfg80211_scan_request *req = sc->offchannel.scan_req; + struct ieee80211_vif *vif = sc->offchannel.scan_vif; + struct ath_tx_control txctl = {}; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + int band = sc->offchannel.chan.chandef.chan->band; + + skb = ieee80211_probereq_get(sc->hw, vif, + ssid->ssid, ssid->ssid_len, req->ie_len); + if (!skb) + return; + + info = IEEE80211_SKB_CB(skb); + if (req->no_cck) + info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE; + + if (req->ie_len) + memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len); + + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + + if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL)) + goto error; + + txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; + txctl.force_channel = true; + if (ath_tx_start(sc->hw, skb, &txctl)) + goto error; + + return; + +error: + ieee80211_free_txskb(sc->hw, skb); +} + +static void ath_scan_channel_start(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct cfg80211_scan_request *req = sc->offchannel.scan_req; + int i; + + if (!(sc->cur_chan->chandef.chan->flags & IEEE80211_CHAN_NO_IR) && + req->n_ssids) { + for (i = 0; i < req->n_ssids; i++) + ath_scan_send_probe(sc, &req->ssids[i]); + + } + + ath_dbg(common, CHAN_CTX, + "Moving offchannel state to ATH_OFFCHANNEL_PROBE_WAIT\n"); + + sc->offchannel.state = ATH_OFFCHANNEL_PROBE_WAIT; + mod_timer(&sc->offchannel.timer, jiffies + sc->offchannel.duration); +} + +static void ath_chanctx_timer(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *) data; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + ath_dbg(common, CHAN_CTX, + "Channel context timer invoked\n"); + + ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); +} + +static void ath_offchannel_timer(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *)data; + struct ath_chanctx *ctx; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + ath_dbg(common, CHAN_CTX, "%s: offchannel state: %s\n", + __func__, offchannel_state_string(sc->offchannel.state)); + + switch (sc->offchannel.state) { + case ATH_OFFCHANNEL_PROBE_WAIT: + if (!sc->offchannel.scan_req) + return; + + /* get first active channel context */ + ctx = ath_chanctx_get_oper_chan(sc, true); + if (ctx->active) { + ath_dbg(common, CHAN_CTX, + "Switch to oper/active context, " + "move offchannel state to ATH_OFFCHANNEL_SUSPEND\n"); + + sc->offchannel.state = ATH_OFFCHANNEL_SUSPEND; + ath_chanctx_switch(sc, ctx, NULL); + mod_timer(&sc->offchannel.timer, jiffies + HZ / 10); + break; + } + /* fall through */ + case ATH_OFFCHANNEL_SUSPEND: + if (!sc->offchannel.scan_req) + return; + + ath_scan_next_channel(sc); + break; + case ATH_OFFCHANNEL_ROC_START: + case ATH_OFFCHANNEL_ROC_WAIT: + ctx = ath_chanctx_get_oper_chan(sc, false); + sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE; + ath_chanctx_switch(sc, ctx, NULL); + break; + default: + break; + } +} + +static bool +ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, + bool powersave) +{ + struct ieee80211_vif *vif = avp->vif; + struct ieee80211_sta *sta = NULL; + struct ieee80211_hdr_3addr *nullfunc; + struct ath_tx_control txctl; + struct sk_buff *skb; + int band = sc->cur_chan->chandef.chan->band; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + if (!vif->bss_conf.assoc) + return false; + + skb = ieee80211_nullfunc_get(sc->hw, vif); + if (!skb) + return false; + + nullfunc = (struct ieee80211_hdr_3addr *) skb->data; + if (powersave) + nullfunc->frame_control |= + cpu_to_le16(IEEE80211_FCTL_PM); + + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) { + dev_kfree_skb_any(skb); + return false; + } + break; + default: + return false; + } + + memset(&txctl, 0, sizeof(txctl)); + txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; + txctl.sta = sta; + txctl.force_channel = true; + if (ath_tx_start(sc->hw, skb, &txctl)) { + ieee80211_free_txskb(sc->hw, skb); + return false; + } + + return true; +} + +static bool +ath_chanctx_send_ps_frame(struct ath_softc *sc, bool powersave) +{ + struct ath_vif *avp; + bool sent = false; + + rcu_read_lock(); + list_for_each_entry(avp, &sc->cur_chan->vifs, list) { + if (ath_chanctx_send_vif_ps_frame(sc, avp, powersave)) + sent = true; + } + rcu_read_unlock(); + + return sent; +} + +static bool ath_chanctx_defer_switch(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + if (sc->cur_chan == &sc->offchannel.chan) + return false; + + switch (sc->sched.state) { + case ATH_CHANCTX_STATE_SWITCH: + return false; + case ATH_CHANCTX_STATE_IDLE: + if (!sc->cur_chan->switch_after_beacon) + return false; + + ath_dbg(common, CHAN_CTX, + "Defer switch, set chanctx state to WAIT_FOR_BEACON\n"); + + sc->sched.state = ATH_CHANCTX_STATE_WAIT_FOR_BEACON; + break; + default: + break; + } + + return true; +} + +static void ath_offchannel_channel_change(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + ath_dbg(common, CHAN_CTX, "%s: offchannel state: %s\n", + __func__, offchannel_state_string(sc->offchannel.state)); + + switch (sc->offchannel.state) { + case ATH_OFFCHANNEL_PROBE_SEND: + if (!sc->offchannel.scan_req) + return; + + if (sc->cur_chan->chandef.chan != + sc->offchannel.chan.chandef.chan) + return; + + ath_scan_channel_start(sc); + break; + case ATH_OFFCHANNEL_IDLE: + if (!sc->offchannel.scan_req) + return; + + ath_scan_complete(sc, false); + break; + case ATH_OFFCHANNEL_ROC_START: + if (sc->cur_chan != &sc->offchannel.chan) + break; + + sc->offchannel.state = ATH_OFFCHANNEL_ROC_WAIT; + mod_timer(&sc->offchannel.timer, jiffies + + msecs_to_jiffies(sc->offchannel.duration)); + ieee80211_ready_on_channel(sc->hw); + break; + case ATH_OFFCHANNEL_ROC_DONE: + ath_roc_complete(sc, false); + break; + default: + break; + } +} + +void ath_chanctx_set_next(struct ath_softc *sc, bool force) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct timespec ts; + bool measure_time = false; + bool send_ps = false; + + spin_lock_bh(&sc->chan_lock); + if (!sc->next_chan) { + spin_unlock_bh(&sc->chan_lock); + return; } + if (!force && ath_chanctx_defer_switch(sc)) { + spin_unlock_bh(&sc->chan_lock); + return; + } + + ath_dbg(common, CHAN_CTX, + "%s: current: %d MHz, next: %d MHz\n", + __func__, + sc->cur_chan->chandef.center_freq1, + sc->next_chan->chandef.center_freq1); + + if (sc->cur_chan != sc->next_chan) { + ath_dbg(common, CHAN_CTX, + "Stopping current chanctx: %d\n", + sc->cur_chan->chandef.center_freq1); + sc->cur_chan->stopped = true; + spin_unlock_bh(&sc->chan_lock); + + if (sc->next_chan == &sc->offchannel.chan) { + getrawmonotonic(&ts); + measure_time = true; + } + __ath9k_flush(sc->hw, ~0, true); + + if (ath_chanctx_send_ps_frame(sc, true)) + __ath9k_flush(sc->hw, BIT(IEEE80211_AC_VO), false); + + send_ps = true; + spin_lock_bh(&sc->chan_lock); + + if (sc->cur_chan != &sc->offchannel.chan) { + getrawmonotonic(&sc->cur_chan->tsf_ts); + sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah); + } + } + sc->cur_chan = sc->next_chan; + sc->cur_chan->stopped = false; + sc->next_chan = NULL; + sc->sched.offchannel_duration = 0; + if (sc->sched.state != ATH_CHANCTX_STATE_FORCE_ACTIVE) + sc->sched.state = ATH_CHANCTX_STATE_IDLE; + spin_unlock_bh(&sc->chan_lock); + + if (sc->sc_ah->chip_fullsleep || + memcmp(&sc->cur_chandef, &sc->cur_chan->chandef, + sizeof(sc->cur_chandef))) { + ath_dbg(common, CHAN_CTX, + "%s: Set channel %d MHz\n", + __func__, sc->cur_chan->chandef.center_freq1); + ath_set_channel(sc); + if (measure_time) + sc->sched.channel_switch_time = + ath9k_hw_get_tsf_offset(&ts, NULL); + } + if (send_ps) + ath_chanctx_send_ps_frame(sc, false); + + ath_offchannel_channel_change(sc); + ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_SWITCH); +} + +static void ath_chanctx_work(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, + chanctx_work); + mutex_lock(&sc->mutex); + ath_chanctx_set_next(sc, false); + mutex_unlock(&sc->mutex); +} + +void ath9k_offchannel_init(struct ath_softc *sc) +{ + struct ath_chanctx *ctx; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + int i; + + sband = &common->sbands[IEEE80211_BAND_2GHZ]; + if (!sband->n_channels) + sband = &common->sbands[IEEE80211_BAND_5GHZ]; + + chan = &sband->channels[0]; + + ctx = &sc->offchannel.chan; + INIT_LIST_HEAD(&ctx->vifs); + ctx->txpower = ATH_TXPOWER_MAX; + cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20); + + for (i = 0; i < ARRAY_SIZE(ctx->acq); i++) + INIT_LIST_HEAD(&ctx->acq[i]); + + sc->offchannel.chan.offchannel = true; +} + +void ath9k_init_channel_context(struct ath_softc *sc) +{ + INIT_WORK(&sc->chanctx_work, ath_chanctx_work); + + setup_timer(&sc->offchannel.timer, ath_offchannel_timer, + (unsigned long)sc); + setup_timer(&sc->sched.timer, ath_chanctx_timer, + (unsigned long)sc); +} + +void ath9k_deinit_channel_context(struct ath_softc *sc) +{ + cancel_work_sync(&sc->chanctx_work); +} + +bool ath9k_is_chanctx_enabled(void) +{ + return (ath9k_use_chanctx == 1); +} + +/********************/ +/* Queue management */ +/********************/ + +void ath9k_chanctx_wake_queues(struct ath_softc *sc) +{ + struct ath_hw *ah = sc->sc_ah; + int i; + + if (sc->cur_chan == &sc->offchannel.chan) { + ieee80211_wake_queue(sc->hw, + sc->hw->offchannel_tx_hw_queue); + } else { + for (i = 0; i < IEEE80211_NUM_ACS; i++) + ieee80211_wake_queue(sc->hw, + sc->cur_chan->hw_queue_base + i); + } + + if (ah->opmode == NL80211_IFTYPE_AP) + ieee80211_wake_queue(sc->hw, sc->hw->queues - 2); } + +/*****************/ +/* P2P Powersave */ +/*****************/ + +static void ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp) +{ + struct ath_hw *ah = sc->sc_ah; + s32 tsf, target_tsf; + + if (!avp || !avp->noa.has_next_tsf) + return; + + ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer); + + tsf = ath9k_hw_gettsf32(sc->sc_ah); + + target_tsf = avp->noa.next_tsf; + if (!avp->noa.absent) + target_tsf -= ATH_P2P_PS_STOP_TIME; + + if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME) + target_tsf = tsf + ATH_P2P_PS_STOP_TIME; + + ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000); +} + +static void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif) +{ + struct ath_vif *avp = (void *)vif->drv_priv; + u32 tsf; + + if (!sc->p2p_ps_timer) + return; + + if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p) + return; + + sc->p2p_ps_vif = avp; + tsf = ath9k_hw_gettsf32(sc->sc_ah); + ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf); + ath9k_update_p2p_ps_timer(sc, avp); +} + +void ath9k_beacon_add_noa(struct ath_softc *sc, struct ath_vif *avp, + struct sk_buff *skb) +{ + static const u8 noa_ie_hdr[] = { + WLAN_EID_VENDOR_SPECIFIC, /* type */ + 0, /* length */ + 0x50, 0x6f, 0x9a, /* WFA OUI */ + 0x09, /* P2P subtype */ + 0x0c, /* Notice of Absence */ + 0x00, /* LSB of little-endian len */ + 0x00, /* MSB of little-endian len */ + }; + + struct ieee80211_p2p_noa_attr *noa; + int noa_len, noa_desc, i = 0; + u8 *hdr; + + if (!avp->offchannel_duration && !avp->periodic_noa_duration) + return; + + noa_desc = !!avp->offchannel_duration + !!avp->periodic_noa_duration; + noa_len = 2 + sizeof(struct ieee80211_p2p_noa_desc) * noa_desc; + + hdr = skb_put(skb, sizeof(noa_ie_hdr)); + memcpy(hdr, noa_ie_hdr, sizeof(noa_ie_hdr)); + hdr[1] = sizeof(noa_ie_hdr) + noa_len - 2; + hdr[7] = noa_len; + + noa = (void *) skb_put(skb, noa_len); + memset(noa, 0, noa_len); + + noa->index = avp->noa_index; + if (avp->periodic_noa_duration) { + u32 interval = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval); + + noa->desc[i].count = 255; + noa->desc[i].start_time = cpu_to_le32(avp->periodic_noa_start); + noa->desc[i].duration = cpu_to_le32(avp->periodic_noa_duration); + noa->desc[i].interval = cpu_to_le32(interval); + i++; + } + + if (avp->offchannel_duration) { + noa->desc[i].count = 1; + noa->desc[i].start_time = cpu_to_le32(avp->offchannel_start); + noa->desc[i].duration = cpu_to_le32(avp->offchannel_duration); + } +} + +void ath9k_p2p_ps_timer(void *priv) +{ + struct ath_softc *sc = priv; + struct ath_vif *avp = sc->p2p_ps_vif; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + struct ath_node *an; + u32 tsf; + + del_timer_sync(&sc->sched.timer); + ath9k_hw_gen_timer_stop(sc->sc_ah, sc->p2p_ps_timer); + ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); + + if (!avp || avp->chanctx != sc->cur_chan) + return; + + tsf = ath9k_hw_gettsf32(sc->sc_ah); + if (!avp->noa.absent) + tsf += ATH_P2P_PS_STOP_TIME; + + if (!avp->noa.has_next_tsf || + avp->noa.next_tsf - tsf > BIT(31)) + ieee80211_update_p2p_noa(&avp->noa, tsf); + + ath9k_update_p2p_ps_timer(sc, avp); + + rcu_read_lock(); + + vif = avp->vif; + sta = ieee80211_find_sta(vif, vif->bss_conf.bssid); + if (!sta) + goto out; + + an = (void *) sta->drv_priv; + if (an->sleeping == !!avp->noa.absent) + goto out; + + an->sleeping = avp->noa.absent; + if (an->sleeping) + ath_tx_aggr_sleep(sta, sc, an); + else + ath_tx_aggr_wakeup(sc, an); + +out: + rcu_read_unlock(); +} + +void ath9k_p2p_bss_info_changed(struct ath_softc *sc, + struct ieee80211_vif *vif) +{ + unsigned long flags; + + spin_lock_bh(&sc->sc_pcu_lock); + spin_lock_irqsave(&sc->sc_pm_lock, flags); + if (!(sc->ps_flags & PS_BEACON_SYNC)) + ath9k_update_p2p_ps(sc, vif); + spin_unlock_irqrestore(&sc->sc_pm_lock, flags); + spin_unlock_bh(&sc->sc_pcu_lock); +} + +void ath9k_p2p_beacon_sync(struct ath_softc *sc) +{ + if (sc->p2p_ps_vif) + ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif); +} + +void ath9k_p2p_remove_vif(struct ath_softc *sc, + struct ieee80211_vif *vif) +{ + struct ath_vif *avp = (void *)vif->drv_priv; + + spin_lock_bh(&sc->sc_pcu_lock); + if (avp == sc->p2p_ps_vif) { + sc->p2p_ps_vif = NULL; + ath9k_update_p2p_ps_timer(sc, NULL); + } + spin_unlock_bh(&sc->sc_pcu_lock); +} + +int ath9k_init_p2p(struct ath_softc *sc) +{ + sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer, + NULL, sc, AR_FIRST_NDP_TIMER); + if (!sc->p2p_ps_timer) + return -ENOMEM; + + return 0; +} + +void ath9k_deinit_p2p(struct ath_softc *sc) +{ + if (sc->p2p_ps_timer) + ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer); +} + +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 8a3bd5fe3a54..d779f4fa50e3 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -592,6 +592,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + hw->queues = 4; hw->max_listen_interval = 1; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 39419ea845cc..ca10a8b3a381 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -61,10 +61,14 @@ static int ath9k_ps_enable; module_param_named(ps_enable, ath9k_ps_enable, int, 0444); MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave"); +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + int ath9k_use_chanctx; module_param_named(use_chanctx, ath9k_use_chanctx, int, 0444); MODULE_PARM_DESC(use_chanctx, "Enable channel context for concurrency"); +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ + bool is_ath9k_unloaded; #ifdef CONFIG_MAC80211_LEDS @@ -511,7 +515,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, sc->tx99_power = MAX_RATE_POWER + 1; init_waitqueue_head(&sc->tx_wait); sc->cur_chan = &sc->chanctx[0]; - if (!ath9k_use_chanctx) + if (!ath9k_is_chanctx_enabled()) sc->cur_chan->hw_queue_base = 0; if (!pdata || pdata->use_eeprom) { @@ -567,11 +571,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, setup_timer(&sc->sleep_timer, ath_ps_full_sleep, (unsigned long)sc); INIT_WORK(&sc->hw_reset_work, ath_reset_work); INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); - INIT_WORK(&sc->chanctx_work, ath_chanctx_work); INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work); - setup_timer(&sc->offchannel.timer, ath_offchannel_timer, - (unsigned long)sc); - setup_timer(&sc->sched.timer, ath_chanctx_timer, (unsigned long)sc); + + ath9k_init_channel_context(sc); /* * Cache line size is used to size and align various @@ -600,13 +602,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, if (ret) goto err_btcoex; - sc->p2p_ps_timer = ath_gen_timer_alloc(sc->sc_ah, ath9k_p2p_ps_timer, - NULL, sc, AR_FIRST_NDP_TIMER); + ret = ath9k_init_p2p(sc); + if (ret) + goto err_btcoex; ath9k_cmn_init_crypto(sc->sc_ah); ath9k_init_misc(sc); ath_fill_led_pin(sc); ath_chanctx_init(sc); + ath9k_offchannel_init(sc); if (common->bus_ops->aspm_init) common->bus_ops->aspm_init(common); @@ -672,18 +676,14 @@ static const struct ieee80211_iface_limit wds_limits[] = { { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) }, }; +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + static const struct ieee80211_iface_limit if_limits_multi[] = { - { .max = 1, .types = BIT(NL80211_IFTYPE_STATION) }, - { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, -}; - -static const struct ieee80211_iface_limit if_dfs_limits[] = { - { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_ADHOC) }, + { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, }; static const struct ieee80211_iface_combination if_comb_multi[] = { @@ -696,6 +696,16 @@ static const struct ieee80211_iface_combination if_comb_multi[] = { }, }; +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ + +static const struct ieee80211_iface_limit if_dfs_limits[] = { + { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_ADHOC) }, +}; + static const struct ieee80211_iface_combination if_comb[] = { { .limits = if_limits, @@ -763,24 +773,31 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_MESH_POINT); - if (!ath9k_use_chanctx) { + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_WDS); + hw->wiphy->iface_combinations = if_comb; hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); - hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_WDS); - } else { - hw->wiphy->iface_combinations = if_comb_multi; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(if_comb_multi); - hw->wiphy->max_scan_ssids = 255; - hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; - hw->wiphy->max_remain_on_channel_duration = 10000; - hw->chanctx_data_size = sizeof(void *); - hw->extra_beacon_tailroom = - sizeof(struct ieee80211_p2p_noa_attr) + 9; - } } +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT + + if (ath9k_is_chanctx_enabled()) { + hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS); + hw->wiphy->iface_combinations = if_comb_multi; + hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi); + hw->wiphy->max_scan_ssids = 255; + hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; + hw->wiphy->max_remain_on_channel_duration = 10000; + hw->chanctx_data_size = sizeof(void *); + hw->extra_beacon_tailroom = + sizeof(struct ieee80211_p2p_noa_attr) + 9; + + ath_dbg(common, CHAN_CTX, "Use channel contexts\n"); + } + +#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -915,9 +932,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc) { int i = 0; - if (sc->p2p_ps_timer) - ath_gen_timer_free(sc->sc_ah, sc->p2p_ps_timer); - + ath9k_deinit_p2p(sc); ath9k_deinit_btcoex(sc); for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index e6ac8d2e610c..d9be83158a82 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -223,7 +223,6 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); unsigned long flags; - int i; if (ath_startrecv(sc) != 0) { ath_err(common, "Unable to restart recv logic\n"); @@ -268,20 +267,10 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) ath9k_hw_set_interrupts(ah); ath9k_hw_enable_interrupts(ah); - if (!ath9k_use_chanctx) + if (!ath9k_is_chanctx_enabled()) ieee80211_wake_queues(sc->hw); - else { - if (sc->cur_chan == &sc->offchannel.chan) - ieee80211_wake_queue(sc->hw, - sc->hw->offchannel_tx_hw_queue); - else { - for (i = 0; i < IEEE80211_NUM_ACS; i++) - ieee80211_wake_queue(sc->hw, - sc->cur_chan->hw_queue_base + i); - } - if (ah->opmode == NL80211_IFTYPE_AP) - ieee80211_wake_queue(sc->hw, sc->hw->queues - 2); - } + else + ath9k_chanctx_wake_queues(sc); ath9k_p2p_ps_timer(sc); @@ -314,6 +303,9 @@ int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) if (!ath_prepare_reset(sc)) fastcc = false; + if (ath9k_is_chanctx_enabled()) + fastcc = false; + spin_lock_bh(&sc->chan_lock); sc->cur_chandef = sc->cur_chan->chandef; spin_unlock_bh(&sc->chan_lock); @@ -822,7 +814,8 @@ static void ath9k_stop(struct ieee80211_hw *hw) struct ath_common *common = ath9k_hw_common(ah); bool prev_idle; - cancel_work_sync(&sc->chanctx_work); + ath9k_deinit_channel_context(sc); + mutex_lock(&sc->mutex); ath_cancel_work(sc); @@ -903,9 +896,9 @@ static bool ath9k_uses_beacons(int type) } } -static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data, + u8 *mac, struct ieee80211_vif *vif) { - struct ath9k_vif_iter_data *iter_data = data; int i; if (iter_data->has_hw_macaddr) { @@ -968,6 +961,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, list_for_each_entry(avp, &ctx->vifs, list) ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif); +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT if (ctx == &sc->offchannel.chan) { struct ieee80211_vif *vif; @@ -980,6 +974,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, ath9k_vif_iter(iter_data, vif->addr, vif); iter_data->beacons = false; } +#endif } static void ath9k_set_assoc_state(struct ath_softc *sc, @@ -1139,7 +1134,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, ath9k_beacon_assign_slot(sc, vif); avp->vif = vif; - if (!ath9k_use_chanctx) { + if (!ath9k_is_chanctx_enabled()) { avp->chanctx = sc->cur_chan; list_add_tail(&avp->list, &avp->chanctx->vifs); } @@ -1202,29 +1197,6 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, return 0; } -static void -ath9k_update_p2p_ps_timer(struct ath_softc *sc, struct ath_vif *avp) -{ - struct ath_hw *ah = sc->sc_ah; - s32 tsf, target_tsf; - - if (!avp || !avp->noa.has_next_tsf) - return; - - ath9k_hw_gen_timer_stop(ah, sc->p2p_ps_timer); - - tsf = ath9k_hw_gettsf32(sc->sc_ah); - - target_tsf = avp->noa.next_tsf; - if (!avp->noa.absent) - target_tsf -= ATH_P2P_PS_STOP_TIME; - - if (target_tsf - tsf < ATH_P2P_PS_STOP_TIME) - target_tsf = tsf + ATH_P2P_PS_STOP_TIME; - - ath9k_hw_gen_timer_start(ah, sc->p2p_ps_timer, (u32) target_tsf, 1000000); -} - static void ath9k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { @@ -1236,16 +1208,11 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); - spin_lock_bh(&sc->sc_pcu_lock); - if (avp == sc->p2p_ps_vif) { - sc->p2p_ps_vif = NULL; - ath9k_update_p2p_ps_timer(sc, NULL); - } - spin_unlock_bh(&sc->sc_pcu_lock); + ath9k_p2p_remove_vif(sc, vif); sc->nvifs--; sc->tx99_vif = NULL; - if (!ath9k_use_chanctx) + if (!ath9k_is_chanctx_enabled()) list_del(&avp->list); if (ath9k_uses_beacons(vif->type)) @@ -1423,7 +1390,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) } } - if (!ath9k_use_chanctx && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { + if (!ath9k_is_chanctx_enabled() && (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { ctx->offchannel = !!(conf->flags & IEEE80211_CONF_OFFCHANNEL); ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef); } @@ -1687,70 +1654,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw, return ret; } -void ath9k_p2p_ps_timer(void *priv) -{ - struct ath_softc *sc = priv; - struct ath_vif *avp = sc->p2p_ps_vif; - struct ieee80211_vif *vif; - struct ieee80211_sta *sta; - struct ath_node *an; - u32 tsf; - - del_timer_sync(&sc->sched.timer); - ath9k_hw_gen_timer_stop(sc->sc_ah, sc->p2p_ps_timer); - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_TSF_TIMER); - - if (!avp || avp->chanctx != sc->cur_chan) - return; - - tsf = ath9k_hw_gettsf32(sc->sc_ah); - if (!avp->noa.absent) - tsf += ATH_P2P_PS_STOP_TIME; - - if (!avp->noa.has_next_tsf || - avp->noa.next_tsf - tsf > BIT(31)) - ieee80211_update_p2p_noa(&avp->noa, tsf); - - ath9k_update_p2p_ps_timer(sc, avp); - - rcu_read_lock(); - - vif = avp->vif; - sta = ieee80211_find_sta(vif, vif->bss_conf.bssid); - if (!sta) - goto out; - - an = (void *) sta->drv_priv; - if (an->sleeping == !!avp->noa.absent) - goto out; - - an->sleeping = avp->noa.absent; - if (an->sleeping) - ath_tx_aggr_sleep(sta, sc, an); - else - ath_tx_aggr_wakeup(sc, an); - -out: - rcu_read_unlock(); -} - -void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif) -{ - struct ath_vif *avp = (void *)vif->drv_priv; - u32 tsf; - - if (!sc->p2p_ps_timer) - return; - - if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p) - return; - - sc->p2p_ps_vif = avp; - tsf = ath9k_hw_gettsf32(sc->sc_ah); - ieee80211_parse_p2p_noa(&vif->bss_conf.p2p_noa_attr, &avp->noa, tsf); - ath9k_update_p2p_ps_timer(sc, avp); -} - static void ath9k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1765,7 +1668,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_vif *avp = (void *)vif->drv_priv; - unsigned long flags; int slottime; ath9k_ps_wakeup(sc); @@ -1776,8 +1678,12 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, bss_conf->bssid, bss_conf->assoc); ath9k_calculate_summary_state(sc, avp->chanctx); - if (bss_conf->assoc) - ath_chanctx_event(sc, vif, ATH_CHANCTX_EVENT_ASSOC); + + if (ath9k_is_chanctx_enabled()) { + if (bss_conf->assoc) + ath_chanctx_event(sc, vif, + ATH_CHANCTX_EVENT_ASSOC); + } } if (changed & BSS_CHANGED_IBSS) { @@ -1814,14 +1720,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, } } - if (changed & BSS_CHANGED_P2P_PS) { - spin_lock_bh(&sc->sc_pcu_lock); - spin_lock_irqsave(&sc->sc_pm_lock, flags); - if (!(sc->ps_flags & PS_BEACON_SYNC)) - ath9k_update_p2p_ps(sc, vif); - spin_unlock_irqrestore(&sc->sc_pm_lock, flags); - spin_unlock_bh(&sc->sc_pcu_lock); - } + if (changed & BSS_CHANGED_P2P_PS) + ath9k_p2p_bss_info_changed(sc, vif); if (changed & CHECK_ANI) ath_check_ani(sc); @@ -2207,207 +2107,7 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) clear_bit(ATH_OP_SCANNING, &common->op_flags); } -static int ath_scan_channel_duration(struct ath_softc *sc, - struct ieee80211_channel *chan) -{ - struct cfg80211_scan_request *req = sc->offchannel.scan_req; - - if (!req->n_ssids || (chan->flags & IEEE80211_CHAN_NO_IR)) - return (HZ / 9); /* ~110 ms */ - - return (HZ / 16); /* ~60 ms */ -} - -static void -ath_scan_next_channel(struct ath_softc *sc) -{ - struct cfg80211_scan_request *req = sc->offchannel.scan_req; - struct ieee80211_channel *chan; - - if (sc->offchannel.scan_idx >= req->n_channels) { - sc->offchannel.state = ATH_OFFCHANNEL_IDLE; - ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false), - NULL); - return; - } - - chan = req->channels[sc->offchannel.scan_idx++]; - sc->offchannel.duration = ath_scan_channel_duration(sc, chan); - sc->offchannel.state = ATH_OFFCHANNEL_PROBE_SEND; - ath_chanctx_offchan_switch(sc, chan); -} - -static void ath_offchannel_next(struct ath_softc *sc) -{ - struct ieee80211_vif *vif; - - if (sc->offchannel.scan_req) { - vif = sc->offchannel.scan_vif; - sc->offchannel.chan.txpower = vif->bss_conf.txpower; - ath_scan_next_channel(sc); - } else if (sc->offchannel.roc_vif) { - vif = sc->offchannel.roc_vif; - sc->offchannel.chan.txpower = vif->bss_conf.txpower; - sc->offchannel.duration = sc->offchannel.roc_duration; - sc->offchannel.state = ATH_OFFCHANNEL_ROC_START; - ath_chanctx_offchan_switch(sc, sc->offchannel.roc_chan); - } else { - ath_chanctx_switch(sc, ath_chanctx_get_oper_chan(sc, false), - NULL); - sc->offchannel.state = ATH_OFFCHANNEL_IDLE; - if (sc->ps_idle) - ath_cancel_work(sc); - } -} - -static void ath_roc_complete(struct ath_softc *sc, bool abort) -{ - sc->offchannel.roc_vif = NULL; - sc->offchannel.roc_chan = NULL; - if (!abort) - ieee80211_remain_on_channel_expired(sc->hw); - ath_offchannel_next(sc); - ath9k_ps_restore(sc); -} - -static void ath_scan_complete(struct ath_softc *sc, bool abort) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - - sc->offchannel.scan_req = NULL; - sc->offchannel.scan_vif = NULL; - sc->offchannel.state = ATH_OFFCHANNEL_IDLE; - ieee80211_scan_completed(sc->hw, abort); - clear_bit(ATH_OP_SCANNING, &common->op_flags); - ath_offchannel_next(sc); - ath9k_ps_restore(sc); -} - -static void ath_scan_send_probe(struct ath_softc *sc, - struct cfg80211_ssid *ssid) -{ - struct cfg80211_scan_request *req = sc->offchannel.scan_req; - struct ieee80211_vif *vif = sc->offchannel.scan_vif; - struct ath_tx_control txctl = {}; - struct sk_buff *skb; - struct ieee80211_tx_info *info; - int band = sc->offchannel.chan.chandef.chan->band; - - skb = ieee80211_probereq_get(sc->hw, vif, - ssid->ssid, ssid->ssid_len, req->ie_len); - if (!skb) - return; - - info = IEEE80211_SKB_CB(skb); - if (req->no_cck) - info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE; - - if (req->ie_len) - memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len); - - skb_set_queue_mapping(skb, IEEE80211_AC_VO); - - if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL)) - goto error; - - txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; - txctl.force_channel = true; - if (ath_tx_start(sc->hw, skb, &txctl)) - goto error; - - return; - -error: - ieee80211_free_txskb(sc->hw, skb); -} - -static void ath_scan_channel_start(struct ath_softc *sc) -{ - struct cfg80211_scan_request *req = sc->offchannel.scan_req; - int i; - - if (!(sc->cur_chan->chandef.chan->flags & IEEE80211_CHAN_NO_IR) && - req->n_ssids) { - for (i = 0; i < req->n_ssids; i++) - ath_scan_send_probe(sc, &req->ssids[i]); - - } - - sc->offchannel.state = ATH_OFFCHANNEL_PROBE_WAIT; - mod_timer(&sc->offchannel.timer, jiffies + sc->offchannel.duration); -} - -void ath_offchannel_channel_change(struct ath_softc *sc) -{ - switch (sc->offchannel.state) { - case ATH_OFFCHANNEL_PROBE_SEND: - if (!sc->offchannel.scan_req) - return; - - if (sc->cur_chan->chandef.chan != - sc->offchannel.chan.chandef.chan) - return; - - ath_scan_channel_start(sc); - break; - case ATH_OFFCHANNEL_IDLE: - if (!sc->offchannel.scan_req) - return; - - ath_scan_complete(sc, false); - break; - case ATH_OFFCHANNEL_ROC_START: - if (sc->cur_chan != &sc->offchannel.chan) - break; - - sc->offchannel.state = ATH_OFFCHANNEL_ROC_WAIT; - mod_timer(&sc->offchannel.timer, jiffies + - msecs_to_jiffies(sc->offchannel.duration)); - ieee80211_ready_on_channel(sc->hw); - break; - case ATH_OFFCHANNEL_ROC_DONE: - ath_roc_complete(sc, false); - break; - default: - break; - } -} - -void ath_offchannel_timer(unsigned long data) -{ - struct ath_softc *sc = (struct ath_softc *)data; - struct ath_chanctx *ctx; - - switch (sc->offchannel.state) { - case ATH_OFFCHANNEL_PROBE_WAIT: - if (!sc->offchannel.scan_req) - return; - - /* get first active channel context */ - ctx = ath_chanctx_get_oper_chan(sc, true); - if (ctx->active) { - sc->offchannel.state = ATH_OFFCHANNEL_SUSPEND; - ath_chanctx_switch(sc, ctx, NULL); - mod_timer(&sc->offchannel.timer, jiffies + HZ / 10); - break; - } - /* fall through */ - case ATH_OFFCHANNEL_SUSPEND: - if (!sc->offchannel.scan_req) - return; - - ath_scan_next_channel(sc); - break; - case ATH_OFFCHANNEL_ROC_START: - case ATH_OFFCHANNEL_ROC_WAIT: - ctx = ath_chanctx_get_oper_chan(sc, false); - sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE; - ath_chanctx_switch(sc, ctx, NULL); - break; - default: - break; - } -} +#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) @@ -2430,8 +2130,13 @@ static int ath9k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, sc->offchannel.scan_req = req; sc->offchannel.scan_idx = 0; - if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) + ath_dbg(common, CHAN_CTX, "HW scan request received on vif: %pM\n", + vif->addr); + + if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) { + ath_dbg(common, CHAN_CTX, "Starting HW scan\n"); ath_offchannel_next(sc); + } out: mutex_unlock(&sc->mutex); @@ -2443,6 +2148,9 @@ static void ath9k_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + + ath_dbg(common, CHAN_CTX, "Cancel HW scan on vif: %pM\n", vif->addr); mutex_lock(&sc->mutex); del_timer_sync(&sc->offchannel.timer); @@ -2456,6 +2164,7 @@ static int ath9k_remain_on_channel(struct ieee80211_hw *hw, enum ieee80211_roc_type type) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); int ret = 0; mutex_lock(&sc->mutex); @@ -2470,8 +2179,14 @@ static int ath9k_remain_on_channel(struct ieee80211_hw *hw, sc->offchannel.roc_chan = chan; sc->offchannel.roc_duration = duration; - if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) + ath_dbg(common, CHAN_CTX, + "RoC request on vif: %pM, type: %d duration: %d\n", + vif->addr, type, duration); + + if (sc->offchannel.state == ATH_OFFCHANNEL_IDLE) { + ath_dbg(common, CHAN_CTX, "Starting RoC period\n"); ath_offchannel_next(sc); + } out: mutex_unlock(&sc->mutex); @@ -2482,9 +2197,11 @@ out: static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); mutex_lock(&sc->mutex); + ath_dbg(common, CHAN_CTX, "Cancel RoC\n"); del_timer_sync(&sc->offchannel.timer); if (sc->offchannel.roc_vif) { @@ -2501,6 +2218,7 @@ static int ath9k_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_chanctx *ctx, **ptr; int pos; @@ -2515,10 +2233,18 @@ static int ath9k_add_chanctx(struct ieee80211_hw *hw, ctx->assigned = true; pos = ctx - &sc->chanctx[0]; ctx->hw_queue_base = pos * IEEE80211_NUM_ACS; + + ath_dbg(common, CHAN_CTX, + "Add channel context: %d MHz\n", + conf->def.chan->center_freq); + ath_chanctx_set_channel(sc, ctx, &conf->def); + ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_ASSIGN); + mutex_unlock(&sc->mutex); return 0; } + mutex_unlock(&sc->mutex); return -ENOSPC; } @@ -2528,12 +2254,19 @@ static void ath9k_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_chanctx *ctx = ath_chanctx_get(conf); mutex_lock(&sc->mutex); + + ath_dbg(common, CHAN_CTX, + "Remove channel context: %d MHz\n", + conf->def.chan->center_freq); + ctx->assigned = false; ctx->hw_queue_base = -1; ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_UNASSIGN); + mutex_unlock(&sc->mutex); } @@ -2542,9 +2275,13 @@ static void ath9k_change_chanctx(struct ieee80211_hw *hw, u32 changed) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_chanctx *ctx = ath_chanctx_get(conf); mutex_lock(&sc->mutex); + ath_dbg(common, CHAN_CTX, + "Change channel context: %d MHz\n", + conf->def.chan->center_freq); ath_chanctx_set_channel(sc, ctx, &conf->def); mutex_unlock(&sc->mutex); } @@ -2554,16 +2291,24 @@ static int ath9k_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (void *)vif->drv_priv; struct ath_chanctx *ctx = ath_chanctx_get(conf); int i; mutex_lock(&sc->mutex); + + ath_dbg(common, CHAN_CTX, + "Assign VIF (addr: %pM, type: %d, p2p: %d) to channel context: %d MHz\n", + vif->addr, vif->type, vif->p2p, + conf->def.chan->center_freq); + avp->chanctx = ctx; list_add_tail(&avp->list, &ctx->vifs); ath9k_calculate_summary_state(sc, ctx); for (i = 0; i < IEEE80211_NUM_ACS; i++) vif->hw_queue[i] = ctx->hw_queue_base + i; + mutex_unlock(&sc->mutex); return 0; @@ -2574,36 +2319,79 @@ static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf) { struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (void *)vif->drv_priv; struct ath_chanctx *ctx = ath_chanctx_get(conf); int ac; mutex_lock(&sc->mutex); + + ath_dbg(common, CHAN_CTX, + "Remove VIF (addr: %pM, type: %d, p2p: %d) from channel context: %d MHz\n", + vif->addr, vif->type, vif->p2p, + conf->def.chan->center_freq); + avp->chanctx = NULL; list_del(&avp->list); ath9k_calculate_summary_state(sc, ctx); for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE; + + mutex_unlock(&sc->mutex); +} + +static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath_softc *sc = hw->priv; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_vif *avp = (struct ath_vif *) vif->drv_priv; + bool changed = false; + + if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) + return; + + if (!avp->chanctx) + return; + + mutex_lock(&sc->mutex); + + spin_lock_bh(&sc->chan_lock); + if (sc->next_chan || (sc->cur_chan != avp->chanctx)) { + sc->next_chan = avp->chanctx; + changed = true; + } + ath_dbg(common, CHAN_CTX, + "%s: Set chanctx state to FORCE_ACTIVE, changed: %d\n", + __func__, changed); + sc->sched.state = ATH_CHANCTX_STATE_FORCE_ACTIVE; + spin_unlock_bh(&sc->chan_lock); + + if (changed) + ath_chanctx_set_next(sc, true); + mutex_unlock(&sc->mutex); } void ath9k_fill_chanctx_ops(void) { - if (!ath9k_use_chanctx) + if (!ath9k_is_chanctx_enabled()) return; - ath9k_ops.hw_scan = ath9k_hw_scan; - ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan; - ath9k_ops.remain_on_channel = ath9k_remain_on_channel; + ath9k_ops.hw_scan = ath9k_hw_scan; + ath9k_ops.cancel_hw_scan = ath9k_cancel_hw_scan; + ath9k_ops.remain_on_channel = ath9k_remain_on_channel; ath9k_ops.cancel_remain_on_channel = ath9k_cancel_remain_on_channel; - ath9k_ops.add_chanctx = ath9k_add_chanctx; - ath9k_ops.remove_chanctx = ath9k_remove_chanctx; - ath9k_ops.change_chanctx = ath9k_change_chanctx; - ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx; - ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx; - ath9k_ops.mgd_prepare_tx = ath9k_chanctx_force_active; + ath9k_ops.add_chanctx = ath9k_add_chanctx; + ath9k_ops.remove_chanctx = ath9k_remove_chanctx; + ath9k_ops.change_chanctx = ath9k_change_chanctx; + ath9k_ops.assign_vif_chanctx = ath9k_assign_vif_chanctx; + ath9k_ops.unassign_vif_chanctx = ath9k_unassign_vif_chanctx; + ath9k_ops.mgd_prepare_tx = ath9k_mgd_prepare_tx; } +#endif + struct ieee80211_ops ath9k_ops = { .tx = ath9k_tx, .start = ath9k_start, diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 74ab1d02013b..2aaf233ee5d6 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -425,7 +425,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah)) rfilt |= ATH9K_RX_FILTER_4ADDRESS; - if (ath9k_use_chanctx && + if (ath9k_is_chanctx_enabled() && test_bit(ATH_OP_SCANNING, &common->op_flags)) rfilt |= ATH9K_RX_FILTER_BEACON; @@ -547,8 +547,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) "Reconfigure beacon timers based on synchronized timestamp\n"); if (!(WARN_ON_ONCE(sc->cur_chan->beacon.beacon_interval == 0))) ath9k_set_beacon(sc); - if (sc->p2p_ps_vif) - ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif); + + ath9k_p2p_beacon_sync(sc); } if (ath_beacon_dtim_pending_cab(skb)) { @@ -892,9 +892,10 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, return -EINVAL; } - if (rx_stats->is_mybeacon) { - sc->sched.next_tbtt = rx_stats->rs_tstamp; - ath_chanctx_event(sc, NULL, ATH_CHANCTX_EVENT_BEACON_RECEIVED); + if (ath9k_is_chanctx_enabled()) { + if (rx_stats->is_mybeacon) + ath_chanctx_beacon_recv_ev(sc, rx_stats->rs_tstamp, + ATH_CHANCTX_EVENT_BEACON_RECEIVED); } ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status); diff --git a/drivers/net/wireless/ath/ath9k/spectral.h b/drivers/net/wireless/ath/ath9k/spectral.h index ead63412ee1a..7b410c6858b0 100644 --- a/drivers/net/wireless/ath/ath9k/spectral.h +++ b/drivers/net/wireless/ath/ath9k/spectral.h @@ -17,6 +17,8 @@ #ifndef SPECTRAL_H #define SPECTRAL_H +#include "../spectral_common.h" + /* enum spectral_mode: * * @SPECTRAL_DISABLED: spectral mode is disabled @@ -54,8 +56,6 @@ struct ath_ht20_mag_info { u8 max_exp; } __packed; -#define SPECTRAL_HT20_NUM_BINS 56 - /* WARNING: don't actually use this struct! MAC may vary the amount of * data by -1/+2. This struct is for reference only. */ @@ -83,8 +83,6 @@ struct ath_ht20_40_mag_info { u8 max_exp; } __packed; -#define SPECTRAL_HT20_40_NUM_BINS 128 - /* WARNING: don't actually use this struct! MAC may vary the amount of * data. This struct is for reference only. */ @@ -125,71 +123,6 @@ static inline u8 spectral_bitmap_weight(u8 *bins) return bins[0] & 0x3f; } -/* FFT sample format given to userspace via debugfs. - * - * Please keep the type/length at the front position and change - * other fields after adding another sample type - * - * TODO: this might need rework when switching to nl80211-based - * interface. - */ -enum ath_fft_sample_type { - ATH_FFT_SAMPLE_HT20 = 1, - ATH_FFT_SAMPLE_HT20_40, -}; - -struct fft_sample_tlv { - u8 type; /* see ath_fft_sample */ - __be16 length; - /* type dependent data follows */ -} __packed; - -struct fft_sample_ht20 { - struct fft_sample_tlv tlv; - - u8 max_exp; - - __be16 freq; - s8 rssi; - s8 noise; - - __be16 max_magnitude; - u8 max_index; - u8 bitmap_weight; - - __be64 tsf; - - u8 data[SPECTRAL_HT20_NUM_BINS]; -} __packed; - -struct fft_sample_ht20_40 { - struct fft_sample_tlv tlv; - - u8 channel_type; - __be16 freq; - - s8 lower_rssi; - s8 upper_rssi; - - __be64 tsf; - - s8 lower_noise; - s8 upper_noise; - - __be16 lower_max_magnitude; - __be16 upper_max_magnitude; - - u8 lower_max_index; - u8 upper_max_index; - - u8 lower_bitmap_weight; - u8 upper_bitmap_weight; - - u8 max_exp; - - u8 data[SPECTRAL_HT20_40_NUM_BINS]; -} __packed; - void ath9k_spectral_init_debug(struct ath_softc *sc); void ath9k_spectral_deinit_debug(struct ath_softc *sc); diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c index a4f4f0da81f6..33531d9a4d50 100644 --- a/drivers/net/wireless/ath/ath9k/wow.c +++ b/drivers/net/wireless/ath/ath9k/wow.c @@ -193,7 +193,8 @@ int ath9k_suspend(struct ieee80211_hw *hw, u32 wow_triggers_enabled = 0; int ret = 0; - cancel_work_sync(&sc->chanctx_work); + ath9k_deinit_channel_context(sc); + mutex_lock(&sc->mutex); ath_cancel_work(sc); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 704fcbcbe20b..281986613fb2 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2632,8 +2632,11 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) sc->beacon.tx_processed = true; sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); - ath_chanctx_event(sc, NULL, - ATH_CHANCTX_EVENT_BEACON_SENT); + if (ath9k_is_chanctx_enabled()) { + ath_chanctx_event(sc, NULL, + ATH_CHANCTX_EVENT_BEACON_SENT); + } + ath9k_csa_update(sc); continue; } diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index f8ded84b7be8..ef5b6dc7b7f1 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1430,18 +1430,10 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, if (!sta_info->ht_sta) return -EOPNOTSUPP; - rcu_read_lock(); - if (rcu_dereference(sta_info->agg[tid])) { - rcu_read_unlock(); - return -EBUSY; - } - tid_info = kzalloc(sizeof(struct carl9170_sta_tid), GFP_ATOMIC); - if (!tid_info) { - rcu_read_unlock(); + if (!tid_info) return -ENOMEM; - } tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn); tid_info->state = CARL9170_TID_STATE_PROGRESS; @@ -1460,7 +1452,6 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list); rcu_assign_pointer(sta_info->agg[tid], tid_info); spin_unlock_bh(&ar->tx_ampdu_list_lock); - rcu_read_unlock(); ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 4cadfd48ffdf..ae86a600d920 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -1557,7 +1557,7 @@ static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar) } out: - rcu_assign_pointer(ar->beacon_iter, cvif); + RCU_INIT_POINTER(ar->beacon_iter, cvif); return cvif; } diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h new file mode 100644 index 000000000000..0d742acb1599 --- /dev/null +++ b/drivers/net/wireless/ath/spectral_common.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2013 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef SPECTRAL_COMMON_H +#define SPECTRAL_COMMON_H + +#define SPECTRAL_HT20_NUM_BINS 56 +#define SPECTRAL_HT20_40_NUM_BINS 128 + +/* TODO: could possibly be 512, but no samples this large + * could be acquired so far. + */ +#define SPECTRAL_ATH10K_MAX_NUM_BINS 256 + +/* FFT sample format given to userspace via debugfs. + * + * Please keep the type/length at the front position and change + * other fields after adding another sample type + * + * TODO: this might need rework when switching to nl80211-based + * interface. + */ +enum ath_fft_sample_type { + ATH_FFT_SAMPLE_HT20 = 1, + ATH_FFT_SAMPLE_HT20_40, + ATH_FFT_SAMPLE_ATH10K, +}; + +struct fft_sample_tlv { + u8 type; /* see ath_fft_sample */ + __be16 length; + /* type dependent data follows */ +} __packed; + +struct fft_sample_ht20 { + struct fft_sample_tlv tlv; + + u8 max_exp; + + __be16 freq; + s8 rssi; + s8 noise; + + __be16 max_magnitude; + u8 max_index; + u8 bitmap_weight; + + __be64 tsf; + + u8 data[SPECTRAL_HT20_NUM_BINS]; +} __packed; + +struct fft_sample_ht20_40 { + struct fft_sample_tlv tlv; + + u8 channel_type; + __be16 freq; + + s8 lower_rssi; + s8 upper_rssi; + + __be64 tsf; + + s8 lower_noise; + s8 upper_noise; + + __be16 lower_max_magnitude; + __be16 upper_max_magnitude; + + u8 lower_max_index; + u8 upper_max_index; + + u8 lower_bitmap_weight; + u8 upper_bitmap_weight; + + u8 max_exp; + + u8 data[SPECTRAL_HT20_40_NUM_BINS]; +} __packed; + +struct fft_sample_ath10k { + struct fft_sample_tlv tlv; + u8 chan_width_mhz; + __be16 freq1; + __be16 freq2; + __be16 noise; + __be16 max_magnitude; + __be16 total_gain_db; + __be16 base_pwr_db; + __be64 tsf; + s8 max_index; + u8 rssi; + u8 relpwr_db; + u8 avgpwr_db; + u8 max_exp; + + u8 data[0]; +} __packed; + +#endif /* SPECTRAL_COMMON_H */ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 4ac2c208c9ba..a00f31881df9 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -311,8 +311,10 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) + cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0])); - if (rc) + if (rc) { + del_timer_sync(&wil->scan_timer); wil->scan_request = NULL; + } return rc; } diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 8f66186adb8c..b1c6a7293390 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -22,6 +22,7 @@ #include <linux/power_supply.h> #include "wil6210.h" +#include "wmi.h" #include "txrx.h" /* Nasty hack. Better have per device instances */ @@ -29,6 +30,21 @@ static u32 mem_addr; static u32 dbg_txdesc_index; static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ +enum dbg_off_type { + doff_u32 = 0, + doff_x32 = 1, + doff_ulong = 2, + doff_io32 = 3, +}; + +/* offset to "wil" */ +struct dbg_off { + const char *name; + umode_t mode; + ulong off; + enum dbg_off_type type; +}; + static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, const char *name, struct vring *vring, char _s, char _h) @@ -244,9 +260,9 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get, static struct dentry *wil_debugfs_create_iomem_x32(const char *name, umode_t mode, struct dentry *parent, - void __iomem *value) + void *value) { - return debugfs_create_file(name, mode, parent, (void * __force)value, + return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32); } @@ -270,6 +286,59 @@ static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode, return debugfs_create_file(name, mode, parent, value, &wil_fops_ulong); } +/** + * wil6210_debugfs_init_offset - create set of debugfs files + * @wil - driver's context, used for printing + * @dbg - directory on the debugfs, where files will be created + * @base - base address used in address calculation + * @tbl - table with file descriptions. Should be terminated with empty element. + * + * Creates files accordingly to the @tbl. + */ +static void wil6210_debugfs_init_offset(struct wil6210_priv *wil, + struct dentry *dbg, void *base, + const struct dbg_off * const tbl) +{ + int i; + + for (i = 0; tbl[i].name; i++) { + struct dentry *f = NULL; + + switch (tbl[i].type) { + case doff_u32: + f = debugfs_create_u32(tbl[i].name, tbl[i].mode, dbg, + base + tbl[i].off); + break; + case doff_x32: + f = debugfs_create_x32(tbl[i].name, tbl[i].mode, dbg, + base + tbl[i].off); + break; + case doff_ulong: + f = wil_debugfs_create_ulong(tbl[i].name, tbl[i].mode, + dbg, base + tbl[i].off); + break; + case doff_io32: + f = wil_debugfs_create_iomem_x32(tbl[i].name, + tbl[i].mode, dbg, + base + tbl[i].off); + break; + } + if (IS_ERR_OR_NULL(f)) + wil_err(wil, "Create file \"%s\": err %ld\n", + tbl[i].name, PTR_ERR(f)); + } +} + +static const struct dbg_off isr_off[] = { + {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32}, + {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32}, + {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32}, + {"ICS", S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32}, + {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32}, + {"IMS", S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32}, + {"IMC", S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32}, + {}, +}; static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil, const char *name, struct dentry *parent, u32 off) @@ -279,24 +348,19 @@ static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil, if (IS_ERR_OR_NULL(d)) return -ENODEV; - wil_debugfs_create_iomem_x32("ICC", S_IRUGO | S_IWUSR, d, - wil->csr + off); - wil_debugfs_create_iomem_x32("ICR", S_IRUGO | S_IWUSR, d, - wil->csr + off + 4); - wil_debugfs_create_iomem_x32("ICM", S_IRUGO | S_IWUSR, d, - wil->csr + off + 8); - wil_debugfs_create_iomem_x32("ICS", S_IWUSR, d, - wil->csr + off + 12); - wil_debugfs_create_iomem_x32("IMV", S_IRUGO | S_IWUSR, d, - wil->csr + off + 16); - wil_debugfs_create_iomem_x32("IMS", S_IWUSR, d, - wil->csr + off + 20); - wil_debugfs_create_iomem_x32("IMC", S_IWUSR, d, - wil->csr + off + 24); + wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr + off, + isr_off); return 0; } +static const struct dbg_off pseudo_isr_off[] = { + {"CAUSE", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32}, + {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32}, + {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32}, + {}, +}; + static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil, struct dentry *parent) { @@ -305,16 +369,19 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil, if (IS_ERR_OR_NULL(d)) return -ENODEV; - wil_debugfs_create_iomem_x32("CAUSE", S_IRUGO, d, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE)); - wil_debugfs_create_iomem_x32("MASK_SW", S_IRUGO, d, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); - wil_debugfs_create_iomem_x32("MASK_FW", S_IRUGO, d, wil->csr + - HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW)); + wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr, + pseudo_isr_off); return 0; } +static const struct dbg_off itr_cnt_off[] = { + {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32}, + {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32}, + {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32}, + {}, +}; + static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil, struct dentry *parent) { @@ -323,12 +390,8 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil, if (IS_ERR_OR_NULL(d)) return -ENODEV; - wil_debugfs_create_iomem_x32("TRSH", S_IRUGO | S_IWUSR, d, wil->csr + - HOSTADDR(RGF_DMA_ITR_CNT_TRSH)); - wil_debugfs_create_iomem_x32("DATA", S_IRUGO | S_IWUSR, d, wil->csr + - HOSTADDR(RGF_DMA_ITR_CNT_DATA)); - wil_debugfs_create_iomem_x32("CTL", S_IRUGO | S_IWUSR, d, wil->csr + - HOSTADDR(RGF_DMA_ITR_CNT_CRL)); + wil6210_debugfs_init_offset(wil, d, (void * __force)wil->csr, + itr_cnt_off); return 0; } @@ -666,16 +729,79 @@ static const struct file_operations fops_txdesc = { }; /*---------beamforming------------*/ +static char *wil_bfstatus_str(u32 status) +{ + switch (status) { + case 0: + return "Failed"; + case 1: + return "OK"; + case 2: + return "Retrying"; + default: + return "??"; + } +} + +static bool is_all_zeros(void * const x_, size_t sz) +{ + /* if reply is all-0, ignore this CID */ + u32 *x = x_; + int n; + + for (n = 0; n < sz / sizeof(*x); n++) + if (x[n]) + return false; + + return true; +} + static int wil_bf_debugfs_show(struct seq_file *s, void *data) { + int rc; + int i; struct wil6210_priv *wil = s->private; - seq_printf(s, - "TSF : 0x%016llx\n" - "TxMCS : %d\n" - "Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n", - wil->stats.tsf, wil->stats.bf_mcs, - wil->stats.my_rx_sector, wil->stats.my_tx_sector, - wil->stats.peer_rx_sector, wil->stats.peer_tx_sector); + struct wmi_notify_req_cmd cmd = { + .interval_usec = 0, + }; + struct { + struct wil6210_mbox_hdr_wmi wmi; + struct wmi_notify_req_done_event evt; + } __packed reply; + + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + u32 status; + + cmd.cid = i; + rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, &cmd, sizeof(cmd), + WMI_NOTIFY_REQ_DONE_EVENTID, &reply, + sizeof(reply), 20); + /* if reply is all-0, ignore this CID */ + if (rc || is_all_zeros(&reply.evt, sizeof(reply.evt))) + continue; + + status = le32_to_cpu(reply.evt.status); + seq_printf(s, "CID %d {\n" + " TSF = 0x%016llx\n" + " TxMCS = %2d TxTpt = %4d\n" + " SQI = %4d\n" + " Status = 0x%08x %s\n" + " Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n" + " Goodput(rx:tx) %4d:%4d\n" + "}\n", + i, + le64_to_cpu(reply.evt.tsf), + le16_to_cpu(reply.evt.bf_mcs), + le32_to_cpu(reply.evt.tx_tpt), + reply.evt.sqi, + status, wil_bfstatus_str(status), + le16_to_cpu(reply.evt.my_rx_sector), + le16_to_cpu(reply.evt.my_tx_sector), + le16_to_cpu(reply.evt.other_rx_sector), + le16_to_cpu(reply.evt.other_tx_sector), + le32_to_cpu(reply.evt.rx_goodput), + le32_to_cpu(reply.evt.tx_goodput)); + } return 0; } @@ -985,6 +1111,87 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil, } } +/* misc files */ +static const struct { + const char *name; + umode_t mode; + const struct file_operations *fops; +} dbg_files[] = { + {"mbox", S_IRUGO, &fops_mbox}, + {"vrings", S_IRUGO, &fops_vring}, + {"stations", S_IRUGO, &fops_sta}, + {"desc", S_IRUGO, &fops_txdesc}, + {"bf", S_IRUGO, &fops_bf}, + {"ssid", S_IRUGO | S_IWUSR, &fops_ssid}, + {"mem_val", S_IRUGO, &fops_memread}, + {"reset", S_IWUSR, &fops_reset}, + {"rxon", S_IWUSR, &fops_rxon}, + {"tx_mgmt", S_IWUSR, &fops_txmgmt}, + {"wmi_send", S_IWUSR, &fops_wmi}, + {"temp", S_IRUGO, &fops_temp}, + {"freq", S_IRUGO, &fops_freq}, + {"link", S_IRUGO, &fops_link}, + {"info", S_IRUGO, &fops_info}, +}; + +static void wil6210_debugfs_init_files(struct wil6210_priv *wil, + struct dentry *dbg) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dbg_files); i++) + debugfs_create_file(dbg_files[i].name, dbg_files[i].mode, dbg, + wil, dbg_files[i].fops); +} + +/* interrupt control blocks */ +static const struct { + const char *name; + u32 icr_off; +} dbg_icr[] = { + {"USER_ICR", HOSTADDR(RGF_USER_USER_ICR)}, + {"DMA_EP_TX_ICR", HOSTADDR(RGF_DMA_EP_TX_ICR)}, + {"DMA_EP_RX_ICR", HOSTADDR(RGF_DMA_EP_RX_ICR)}, + {"DMA_EP_MISC_ICR", HOSTADDR(RGF_DMA_EP_MISC_ICR)}, +}; + +static void wil6210_debugfs_init_isr(struct wil6210_priv *wil, + struct dentry *dbg) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dbg_icr); i++) + wil6210_debugfs_create_ISR(wil, dbg_icr[i].name, dbg, + dbg_icr[i].icr_off); +} + +#define WIL_FIELD(name, mode, type) { __stringify(name), mode, \ + offsetof(struct wil6210_priv, name), type} + +/* fields in struct wil6210_priv */ +static const struct dbg_off dbg_wil_off[] = { + WIL_FIELD(secure_pcp, S_IRUGO | S_IWUSR, doff_u32), + WIL_FIELD(status, S_IRUGO | S_IWUSR, doff_ulong), + WIL_FIELD(fw_version, S_IRUGO, doff_u32), + WIL_FIELD(hw_version, S_IRUGO, doff_x32), + {}, +}; + +static const struct dbg_off dbg_wil_regs[] = { + {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0), + doff_io32}, + {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32}, + {}, +}; + +/* static parameters */ +static const struct dbg_off dbg_statics[] = { + {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32}, + {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32}, + {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32}, + {}, +}; + int wil6210_debugfs_init(struct wil6210_priv *wil) { struct dentry *dbg = wil->debug = debugfs_create_dir(WIL_NAME, @@ -993,51 +1200,17 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) if (IS_ERR_OR_NULL(dbg)) return -ENODEV; - debugfs_create_file("mbox", S_IRUGO, dbg, wil, &fops_mbox); - debugfs_create_file("vrings", S_IRUGO, dbg, wil, &fops_vring); - debugfs_create_file("stations", S_IRUGO, dbg, wil, &fops_sta); - debugfs_create_file("desc", S_IRUGO, dbg, wil, &fops_txdesc); - debugfs_create_u32("desc_index", S_IRUGO | S_IWUSR, dbg, - &dbg_txdesc_index); - debugfs_create_u32("vring_index", S_IRUGO | S_IWUSR, dbg, - &dbg_vring_index); - - debugfs_create_file("bf", S_IRUGO, dbg, wil, &fops_bf); - debugfs_create_file("ssid", S_IRUGO | S_IWUSR, dbg, wil, &fops_ssid); - debugfs_create_u32("secure_pcp", S_IRUGO | S_IWUSR, dbg, - &wil->secure_pcp); - wil_debugfs_create_ulong("status", S_IRUGO | S_IWUSR, dbg, - &wil->status); - debugfs_create_u32("fw_version", S_IRUGO, dbg, &wil->fw_version); - debugfs_create_x32("hw_version", S_IRUGO, dbg, &wil->hw_version); - - wil6210_debugfs_create_ISR(wil, "USER_ICR", dbg, - HOSTADDR(RGF_USER_USER_ICR)); - wil6210_debugfs_create_ISR(wil, "DMA_EP_TX_ICR", dbg, - HOSTADDR(RGF_DMA_EP_TX_ICR)); - wil6210_debugfs_create_ISR(wil, "DMA_EP_RX_ICR", dbg, - HOSTADDR(RGF_DMA_EP_RX_ICR)); - wil6210_debugfs_create_ISR(wil, "DMA_EP_MISC_ICR", dbg, - HOSTADDR(RGF_DMA_EP_MISC_ICR)); - wil6210_debugfs_create_pseudo_ISR(wil, dbg); - wil6210_debugfs_create_ITR_CNT(wil, dbg); + wil6210_debugfs_init_files(wil, dbg); + wil6210_debugfs_init_isr(wil, dbg); + wil6210_debugfs_init_blobs(wil, dbg); + wil6210_debugfs_init_offset(wil, dbg, wil, dbg_wil_off); + wil6210_debugfs_init_offset(wil, dbg, (void * __force)wil->csr, + dbg_wil_regs); + wil6210_debugfs_init_offset(wil, dbg, NULL, dbg_statics); - wil_debugfs_create_iomem_x32("RGF_USER_USAGE_1", S_IRUGO, dbg, - wil->csr + - HOSTADDR(RGF_USER_USAGE_1)); - debugfs_create_u32("mem_addr", S_IRUGO | S_IWUSR, dbg, &mem_addr); - debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread); - - debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset); - debugfs_create_file("rxon", S_IWUSR, dbg, wil, &fops_rxon); - debugfs_create_file("tx_mgmt", S_IWUSR, dbg, wil, &fops_txmgmt); - debugfs_create_file("wmi_send", S_IWUSR, dbg, wil, &fops_wmi); - debugfs_create_file("temp", S_IRUGO, dbg, wil, &fops_temp); - debugfs_create_file("freq", S_IRUGO, dbg, wil, &fops_freq); - debugfs_create_file("link", S_IRUGO, dbg, wil, &fops_link); - debugfs_create_file("info", S_IRUGO, dbg, wil, &fops_info); + wil6210_debugfs_create_pseudo_ISR(wil, dbg); - wil6210_debugfs_init_blobs(wil, dbg); + wil6210_debugfs_create_ITR_CNT(wil, dbg); return 0; } diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 67f1002a03a1..98bfbb6390b7 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 3704d2a434f3..b69d90f0716f 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -25,6 +25,9 @@ static bool no_fw_recovery; module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(no_fw_recovery, " disable FW error recovery"); +#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */ +#define RST_COUNT (1 + 1000/RST_DELAY) /* round up to be above 1 sec total */ + /* * Due to a hardware issue, * one has to read/write to/from NIC in 32-bit chunks; @@ -309,7 +312,7 @@ void wil_priv_deinit(struct wil6210_priv *wil) destroy_workqueue(wil->wmi_wq); } -static void wil_target_reset(struct wil6210_priv *wil) +static int wil_target_reset(struct wil6210_priv *wil) { int delay = 0; u32 hw_state; @@ -327,6 +330,8 @@ static void wil_target_reset(struct wil6210_priv *wil) /* register clear = read, AND with inverted, write */ #define C(a, v) W(a, R(a) & ~v) + wmb(); /* If host reorder writes here -> race in NIC */ + W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */ wil->hw_version = R(RGF_USER_FW_REV_ID); rev_id = wil->hw_version & 0xff; @@ -343,8 +348,9 @@ static void wil_target_reset(struct wil6210_priv *wil) wmb(); /* order is important here */ } - W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */ W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */ + wmb(); /* If host reorder writes here -> race in NIC */ + W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */ wmb(); /* order is important here */ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); @@ -385,14 +391,14 @@ static void wil_target_reset(struct wil6210_priv *wil) W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); wmb(); /* order is important here */ - /* wait until device ready */ + /* wait until device ready. typical time is 200..250 msec */ do { - msleep(1); + msleep(RST_DELAY); hw_state = R(RGF_USER_HW_MACHINE_STATE); - if (delay++ > 100) { + if (delay++ > RST_COUNT) { wil_err(wil, "Reset not completed, hw_state 0x%08x\n", hw_state); - return; + return -ETIME; } } while (hw_state != HW_MACHINE_BOOT_DONE); @@ -403,7 +409,8 @@ static void wil_target_reset(struct wil6210_priv *wil) C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD); wmb(); /* order is important here */ - wil_dbg_misc(wil, "Reset completed in %d ms\n", delay); + wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY); + return 0; #undef R #undef W @@ -468,10 +475,11 @@ int wil_reset(struct wil6210_priv *wil) flush_workqueue(wil->wmi_wq_conn); flush_workqueue(wil->wmi_wq); - /* TODO: put MAC in reset */ - wil_target_reset(wil); - + rc = wil_target_reset(wil); wil_rx_fini(wil); + if (rc) + return rc; + /* init after reset */ wil->pending_connect_cid = -1; diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 7afce6e8c507..a44c2b61be08 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -168,11 +168,15 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr) void wil_if_free(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); + if (!ndev) return; - free_netdev(ndev); wil_priv_deinit(wil); + + wil_to_ndev(wil) = NULL; + free_netdev(ndev); + wil_wdev_free(wil); } diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index d3fbfa28db62..38dcbea49d44 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -218,12 +218,13 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void wil_pcie_remove(struct pci_dev *pdev) { struct wil6210_priv *wil = pci_get_drvdata(pdev); + void __iomem *csr = wil->csr; wil6210_debugfs_remove(wil); wil_if_pcie_disable(wil); wil_if_remove(wil); wil_if_free(wil); - pci_iounmap(pdev, wil->csr); + pci_iounmap(pdev, csr); pci_release_region(pdev, 0); pci_disable_device(pdev); } @@ -243,6 +244,8 @@ static const struct pci_device_id wil6210_pcie_ids[] = { .driver_data = (kernel_ulong_t)&wil_board_marlon }, { PCI_DEVICE(0x1ae9, 0x0310), .driver_data = (kernel_ulong_t)&wil_board_sparrow }, + { PCI_DEVICE(0x1ae9, 0x0302), /* same as above, firmware broken */ + .driver_data = (kernel_ulong_t)&wil_board_sparrow }, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 180ca4793904..97c6a24716a1 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -1,3 +1,19 @@ +/* + * Copyright (c) 2014 Qualcomm Atheros, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + #include "wil6210.h" #include "txrx.h" diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index d3467943d39d..9bd920d272bb 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -414,7 +414,6 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, cid = wil_rxdesc_cid(d); stats = &wil->sta[cid].stats; stats->last_mcs_rx = wil_rxdesc_mcs(d); - wil->stats.last_mcs_rx = stats->last_mcs_rx; /* use radiotap header only if required */ if (ndev->type == ARPHRD_IEEE80211_RADIOTAP) diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index bc5706a4f007..a1ac4f87d47b 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 67e9624f7111..f8718fe547c4 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -133,6 +133,9 @@ struct RGF_ICR { #define RGF_HP_CTRL (0x88265c) #define RGF_PCIE_LOS_COUNTER_CTL (0x882dc4) +/* MAC timer, usec, for packet lifetime */ +#define RGF_MAC_MTRL_COUNTER_0 (0x886aa8) + /* popular locations */ #define HOST_MBOX HOSTADDR(RGF_USER_USER_SCRATCH_PAD) #define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \ @@ -327,17 +330,6 @@ struct wil_tid_ampdu_rx { bool first_time; /* is it 1-st time this buffer used? */ }; -struct wil6210_stats { - u64 tsf; - u32 snr; - u16 last_mcs_rx; - u16 bf_mcs; /* last BF, used for Tx */ - u16 my_rx_sector; - u16 my_tx_sector; - u16 peer_rx_sector; - u16 peer_tx_sector; -}; - enum wil_sta_status { wil_sta_unused = 0, wil_sta_conn_pending = 1, @@ -430,7 +422,6 @@ struct wil6210_priv { struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */ /* statistics */ - struct wil6210_stats stats; atomic_t isr_count_rx, isr_count_tx; /* debugfs */ struct dentry *debug; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 1d1d0afdd2e1..b1aaaee997d5 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -14,6 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/moduleparam.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> @@ -22,6 +23,10 @@ #include "wmi.h" #include "trace.h" +static uint max_assoc_sta = 1; +module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP"); + /** * WMI event receiving - theory of operations * @@ -346,11 +351,11 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) rx_mgmt_frame->bssid); cfg80211_put_bss(wiphy, bss); } else { - wil_err(wil, "cfg80211_inform_bss() failed\n"); + wil_err(wil, "cfg80211_inform_bss_frame() failed\n"); } } else { cfg80211_rx_mgmt(wil->wdev, freq, signal, - (void *)rx_mgmt_frame, d_len, 0, GFP_KERNEL); + (void *)rx_mgmt_frame, d_len, 0); } } @@ -482,33 +487,6 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, mutex_unlock(&wil->mutex); } -static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len) -{ - struct wmi_notify_req_done_event *evt = d; - - if (len < sizeof(*evt)) { - wil_err(wil, "Short NOTIFY event\n"); - return; - } - - wil->stats.tsf = le64_to_cpu(evt->tsf); - wil->stats.snr = le32_to_cpu(evt->snr_val); - wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs); - wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector); - wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector); - wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector); - wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector); - wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n" - "BF status 0x%08x SNR 0x%08x SQI %d%%\n" - "Tx Tpt %d goodput %d Rx goodput %d\n" - "Sectors(rx:tx) my %d:%d peer %d:%d\n", - wil->stats.bf_mcs, wil->stats.tsf, evt->status, - wil->stats.snr, evt->sqi, le32_to_cpu(evt->tx_tpt), - le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput), - wil->stats.my_rx_sector, wil->stats.my_tx_sector, - wil->stats.peer_rx_sector, wil->stats.peer_tx_sector); -} - /* * Firmware reports EAPOL frame using WME event. * Reconstruct Ethernet frame and deliver it via normal Rx @@ -651,7 +629,6 @@ static const struct { {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete}, {WMI_CONNECT_EVENTID, wmi_evt_connect}, {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, - {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify}, {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx}, {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup}, {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown}, @@ -822,7 +799,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) .network_type = wmi_nettype, .disable_sec_offload = 1, .channel = chan - 1, - .pcp_max_assoc_sta = WIL6210_MAX_CID, + .pcp_max_assoc_sta = max_assoc_sta, }; struct { struct wil6210_mbox_hdr_wmi wmi; @@ -832,6 +809,14 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) if (!wil->secure_pcp) cmd.disable_sec = 1; + if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) || + (cmd.pcp_max_assoc_sta <= 0)) { + wil_info(wil, + "Requested connection limit %u, valid values are 1 - %d. Setting to %d\n", + max_assoc_sta, WIL6210_MAX_CID, WIL6210_MAX_CID); + cmd.pcp_max_assoc_sta = WIL6210_MAX_CID; + } + /* * Processing time may be huge, in case of secure AP it takes about * 3500ms for FW to start AP diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 17334c852866..061618cee9f6 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. * Copyright (c) 2006-2012 Wilocity . * * Permission to use, copy, modify, and/or distribute this software for any @@ -980,7 +980,7 @@ struct wmi_ready_event { * WMI_NOTIFY_REQ_DONE_EVENTID */ struct wmi_notify_req_done_event { - __le32 status; + __le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */ __le64 tsf; __le32 snr_val; __le32 tx_tpt; diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c index 4cfb4d99ced0..7afc9c5329fb 100644 --- a/drivers/net/wireless/atmel_cs.c +++ b/drivers/net/wireless/atmel_cs.c @@ -66,18 +66,18 @@ static void atmel_release(struct pcmcia_device *link); static void atmel_detach(struct pcmcia_device *p_dev); -typedef struct local_info_t { +struct local_info { struct net_device *eth_dev; -} local_info_t; +}; static int atmel_probe(struct pcmcia_device *p_dev) { - local_info_t *local; + struct local_info *local; dev_dbg(&p_dev->dev, "atmel_attach()\n"); /* Allocate space for private device-specific data */ - local = kzalloc(sizeof(local_info_t), GFP_KERNEL); + local = kzalloc(sizeof(*local), GFP_KERNEL); if (!local) return -ENOMEM; @@ -117,7 +117,7 @@ static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data) static int atmel_config(struct pcmcia_device *link) { - local_info_t *dev; + struct local_info *dev; int ret; const struct pcmcia_device_id *did; @@ -141,14 +141,14 @@ static int atmel_config(struct pcmcia_device *link) if (ret) goto failed; - ((local_info_t*)link->priv)->eth_dev = + ((struct local_info *)link->priv)->eth_dev = init_atmel_card(link->irq, link->resource[0]->start, did ? did->driver_info : ATMEL_FW_TYPE_NONE, &link->dev, card_present, link); - if (!((local_info_t*)link->priv)->eth_dev) + if (!((struct local_info *)link->priv)->eth_dev) goto failed; @@ -161,20 +161,20 @@ static int atmel_config(struct pcmcia_device *link) static void atmel_release(struct pcmcia_device *link) { - struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; + struct net_device *dev = ((struct local_info *)link->priv)->eth_dev; dev_dbg(&link->dev, "atmel_release\n"); if (dev) stop_atmel_card(dev); - ((local_info_t*)link->priv)->eth_dev = NULL; + ((struct local_info *)link->priv)->eth_dev = NULL; pcmcia_disable_device(link); } static int atmel_suspend(struct pcmcia_device *link) { - local_info_t *local = link->priv; + struct local_info *local = link->priv; netif_device_detach(local->eth_dev); @@ -183,7 +183,7 @@ static int atmel_suspend(struct pcmcia_device *link) static int atmel_resume(struct pcmcia_device *link) { - local_info_t *local = link->priv; + struct local_info *local = link->priv; atmel_open(local->eth_dev); netif_device_attach(local->eth_dev); diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile index 6e00b8804ada..9f7965aae93d 100644 --- a/drivers/net/wireless/b43/Makefile +++ b/drivers/net/wireless/b43/Makefile @@ -18,6 +18,7 @@ b43-y += xmit.o b43-y += dma.o b43-y += pio.o b43-y += rfkill.o +b43-y += ppr.o b43-$(CONFIG_B43_LEDS) += leds.o b43-$(CONFIG_B43_PCMCIA) += pcmcia.o b43-$(CONFIG_B43_SDIO) += sdio.o diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index 4113b6934764..95a943326420 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -457,6 +457,7 @@ enum { #define B43_MACCTL_RADIOLOCK 0x00080000 /* Radio lock */ #define B43_MACCTL_BEACPROMISC 0x00100000 /* Beacon Promiscuous */ #define B43_MACCTL_KEEP_BADPLCP 0x00200000 /* Keep frames with bad PLCP */ +#define B43_MACCTL_PHY_LOCK 0x00200000 #define B43_MACCTL_KEEP_CTL 0x00400000 /* Keep control frames */ #define B43_MACCTL_KEEP_BAD 0x00800000 /* Keep bad frames (FCS) */ #define B43_MACCTL_PROMISC 0x01000000 /* Promiscuous mode */ @@ -791,6 +792,13 @@ struct b43_firmware { bool pcm_request_failed; }; +enum b43_band { + B43_BAND_2G = 0, + B43_BAND_5G_LO = 1, + B43_BAND_5G_MI = 2, + B43_BAND_5G_HI = 3, +}; + /* Device (802.11 core) initialization status. */ enum { B43_STAT_UNINIT = 0, /* Uninitialized. */ @@ -1012,6 +1020,16 @@ static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value) dev->dev->write16(dev->dev, offset, value); } +/* To optimize this check for flush_writes on BCM47XX_BCMA only. */ +static inline void b43_write16f(struct b43_wldev *dev, u16 offset, u16 value) +{ + b43_write16(dev, offset, value); +#if defined(CONFIG_BCM47XX_BCMA) + if (dev->dev->flush_writes) + b43_read16(dev, offset); +#endif +} + static inline void b43_maskset16(struct b43_wldev *dev, u16 offset, u16 mask, u16 set) { diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c index 565fdbdd6915..17d16a391fe6 100644 --- a/drivers/net/wireless/b43/bus.c +++ b/drivers/net/wireless/b43/bus.c @@ -22,6 +22,10 @@ */ +#ifdef CONFIG_BCM47XX_BCMA +#include <asm/mach-bcm47xx/bcm47xx.h> +#endif + #include "b43.h" #include "bus.h" @@ -102,6 +106,12 @@ struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core) dev->write32 = b43_bus_bcma_write32; dev->block_read = b43_bus_bcma_block_read; dev->block_write = b43_bus_bcma_block_write; +#ifdef CONFIG_BCM47XX_BCMA + if (b43_bus_host_is_pci(dev) && + bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA && + bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4716) + dev->flush_writes = true; +#endif dev->dev = &core->dev; dev->dma_dev = core->dma_dev; diff --git a/drivers/net/wireless/b43/bus.h b/drivers/net/wireless/b43/bus.h index f3205c6988bc..256c2c17939a 100644 --- a/drivers/net/wireless/b43/bus.h +++ b/drivers/net/wireless/b43/bus.h @@ -33,6 +33,7 @@ struct b43_bus_dev { size_t count, u16 offset, u8 reg_width); void (*block_write)(struct b43_bus_dev *dev, const void *buffer, size_t count, u16 offset, u8 reg_width); + bool flush_writes; struct device *dev; struct device *dma_dev; @@ -60,7 +61,21 @@ static inline bool b43_bus_host_is_pcmcia(struct b43_bus_dev *dev) #else return false; #endif +}; + +static inline bool b43_bus_host_is_pci(struct b43_bus_dev *dev) +{ +#ifdef CONFIG_B43_BCMA + if (dev->bus_type == B43_BUS_BCMA) + return (dev->bdev->bus->hosttype == BCMA_HOSTTYPE_PCI); +#endif +#ifdef CONFIG_B43_SSB + if (dev->bus_type == B43_BUS_SSB) + return (dev->sdev->bus->bustype == SSB_BUSTYPE_PCI); +#endif + return false; } + static inline bool b43_bus_host_is_sdio(struct b43_bus_dev *dev) { #ifdef CONFIG_B43_SSB diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 2af1ac396eb4..66ff718cc412 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -4466,10 +4466,10 @@ static int b43_phy_versioning(struct b43_wldev *dev) if (core_rev == 40 || core_rev == 42) { radio_manuf = 0x17F; - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, 0); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, 0); radio_rev = b43_read16(dev, B43_MMIO_RADIO24_DATA); - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, 1); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, 1); radio_id = b43_read16(dev, B43_MMIO_RADIO24_DATA); radio_ver = 0; /* Is there version somewhere? */ @@ -4477,7 +4477,7 @@ static int b43_phy_versioning(struct b43_wldev *dev) u16 radio24[3]; for (tmp = 0; tmp < 3; tmp++) { - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, tmp); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, tmp); radio24[tmp] = b43_read16(dev, B43_MMIO_RADIO24_DATA); } @@ -4494,13 +4494,12 @@ static int b43_phy_versioning(struct b43_wldev *dev) else tmp = 0x5205017F; } else { - b43_write16(dev, B43_MMIO_RADIO_CONTROL, - B43_RADIOCTL_ID); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, + B43_RADIOCTL_ID); tmp = b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); - b43_write16(dev, B43_MMIO_RADIO_CONTROL, - B43_RADIOCTL_ID); - tmp |= (u32)b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH) - << 16; + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, + B43_RADIOCTL_ID); + tmp |= b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH) << 16; } radio_manuf = (tmp & 0x00000FFF); radio_id = (tmp & 0x0FFFF000) >> 12; diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c index 25e40432d68b..99c036f5ecb7 100644 --- a/drivers/net/wireless/b43/phy_a.c +++ b/drivers/net/wireless/b43/phy_a.c @@ -444,14 +444,14 @@ static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset) static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg) { reg = adjust_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); return b43_read16(dev, B43_MMIO_PHY_DATA); } static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) { reg = adjust_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, value); } diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c index 3cbef21b4726..1dfc682a8055 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/b43/phy_common.c @@ -222,12 +222,18 @@ static inline void assert_mac_suspended(struct b43_wldev *dev) u16 b43_radio_read(struct b43_wldev *dev, u16 reg) { assert_mac_suspended(dev); + dev->phy.writes_counter = 0; return dev->phy.ops->radio_read(dev, reg); } void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { assert_mac_suspended(dev); + if (b43_bus_host_is_pci(dev->dev) && + ++dev->phy.writes_counter > B43_MAX_WRITES_IN_ROW) { + b43_read32(dev, B43_MMIO_MACCTL); + dev->phy.writes_counter = 1; + } dev->phy.ops->radio_write(dev, reg, value); } @@ -268,17 +274,28 @@ u16 b43_phy_read(struct b43_wldev *dev, u16 reg) { assert_mac_suspended(dev); dev->phy.writes_counter = 0; - return dev->phy.ops->phy_read(dev, reg); + + if (dev->phy.ops->phy_read) + return dev->phy.ops->phy_read(dev, reg); + + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); + return b43_read16(dev, B43_MMIO_PHY_DATA); } void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value) { assert_mac_suspended(dev); - dev->phy.ops->phy_write(dev, reg, value); - if (++dev->phy.writes_counter == B43_MAX_WRITES_IN_ROW) { + if (b43_bus_host_is_pci(dev->dev) && + ++dev->phy.writes_counter > B43_MAX_WRITES_IN_ROW) { b43_read16(dev, B43_MMIO_PHY_VER); - dev->phy.writes_counter = 0; + dev->phy.writes_counter = 1; } + + if (dev->phy.ops->phy_write) + return dev->phy.ops->phy_write(dev, reg, value); + + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16(dev, B43_MMIO_PHY_DATA, value); } void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg) diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index 8f5c14bc10e6..727ce6edb4b3 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -2555,13 +2555,13 @@ static void b43_gphy_op_exit(struct b43_wldev *dev) static u16 b43_gphy_op_read(struct b43_wldev *dev, u16 reg) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); return b43_read16(dev, B43_MMIO_PHY_DATA); } static void b43_gphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, value); } @@ -2572,7 +2572,7 @@ static u16 b43_gphy_op_radio_read(struct b43_wldev *dev, u16 reg) /* G-PHY needs 0x80 for read access. */ reg |= 0x80; - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } @@ -2581,7 +2581,7 @@ static void b43_gphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } diff --git a/drivers/net/wireless/b43/phy_ht.c b/drivers/net/wireless/b43/phy_ht.c index f2974c6b1c01..c4dc8b020f60 100644 --- a/drivers/net/wireless/b43/phy_ht.c +++ b/drivers/net/wireless/b43/phy_ht.c @@ -1071,22 +1071,10 @@ static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev) * R/W ops. **************************************************/ -static u16 b43_phy_ht_op_read(struct b43_wldev *dev, u16 reg) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - return b43_read16(dev, B43_MMIO_PHY_DATA); -} - -static void b43_phy_ht_op_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - b43_write16(dev, B43_MMIO_PHY_DATA, value); -} - static void b43_phy_ht_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set); } @@ -1096,14 +1084,14 @@ static u16 b43_phy_ht_op_radio_read(struct b43_wldev *dev, u16 reg) /* HT-PHY needs 0x200 for read access */ reg |= 0x200; - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO24_DATA); } static void b43_phy_ht_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO24_DATA, value); } @@ -1126,8 +1114,6 @@ const struct b43_phy_operations b43_phyops_ht = { .free = b43_phy_ht_op_free, .prepare_structs = b43_phy_ht_op_prepare_structs, .init = b43_phy_ht_op_init, - .phy_read = b43_phy_ht_op_read, - .phy_write = b43_phy_ht_op_write, .phy_maskset = b43_phy_ht_op_maskset, .radio_read = b43_phy_ht_op_radio_read, .radio_write = b43_phy_ht_op_radio_write, diff --git a/drivers/net/wireless/b43/phy_lcn.c b/drivers/net/wireless/b43/phy_lcn.c index e76bbdf3247e..97461ccf3e1e 100644 --- a/drivers/net/wireless/b43/phy_lcn.c +++ b/drivers/net/wireless/b43/phy_lcn.c @@ -810,22 +810,10 @@ static void b43_phy_lcn_op_adjust_txpower(struct b43_wldev *dev) * R/W ops. **************************************************/ -static u16 b43_phy_lcn_op_read(struct b43_wldev *dev, u16 reg) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - return b43_read16(dev, B43_MMIO_PHY_DATA); -} - -static void b43_phy_lcn_op_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - b43_write16(dev, B43_MMIO_PHY_DATA, value); -} - static void b43_phy_lcn_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set); } @@ -835,14 +823,14 @@ static u16 b43_phy_lcn_op_radio_read(struct b43_wldev *dev, u16 reg) /* LCN-PHY needs 0x200 for read access */ reg |= 0x200; - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO24_DATA); } static void b43_phy_lcn_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { - b43_write16(dev, B43_MMIO_RADIO24_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO24_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO24_DATA, value); } @@ -855,8 +843,6 @@ const struct b43_phy_operations b43_phyops_lcn = { .free = b43_phy_lcn_op_free, .prepare_structs = b43_phy_lcn_op_prepare_structs, .init = b43_phy_lcn_op_init, - .phy_read = b43_phy_lcn_op_read, - .phy_write = b43_phy_lcn_op_write, .phy_maskset = b43_phy_lcn_op_maskset, .radio_read = b43_phy_lcn_op_radio_read, .radio_write = b43_phy_lcn_op_radio_write, diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c index 92190dacf689..058a9f232050 100644 --- a/drivers/net/wireless/b43/phy_lp.c +++ b/drivers/net/wireless/b43/phy_lp.c @@ -1985,22 +1985,10 @@ static void lpphy_calibration(struct b43_wldev *dev) b43_mac_enable(dev); } -static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - return b43_read16(dev, B43_MMIO_PHY_DATA); -} - -static void b43_lpphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - b43_write16(dev, B43_MMIO_PHY_DATA, value); -} - static void b43_lpphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set); } @@ -2016,7 +2004,7 @@ static u16 b43_lpphy_op_radio_read(struct b43_wldev *dev, u16 reg) } else reg |= 0x200; - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } @@ -2025,7 +2013,7 @@ static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } @@ -2713,8 +2701,6 @@ const struct b43_phy_operations b43_phyops_lp = { .free = b43_lpphy_op_free, .prepare_structs = b43_lpphy_op_prepare_structs, .init = b43_lpphy_op_init, - .phy_read = b43_lpphy_op_read, - .phy_write = b43_lpphy_op_write, .phy_maskset = b43_lpphy_op_maskset, .radio_read = b43_lpphy_op_radio_read, .radio_write = b43_lpphy_op_radio_write, diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index e2a3f0d5bcc2..cf625d8732a7 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -34,6 +34,7 @@ #include "radio_2056.h" #include "radio_2057.h" #include "main.h" +#include "ppr.h" struct nphy_txgains { u16 tx_lpf[2]; @@ -3606,16 +3607,6 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, * Tx and Rx **************************************************/ -static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev) -{//TODO -} - -static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, - bool ignore_tssi) -{//TODO - return B43_TXPWR_RES_DONE; -} - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) { @@ -4069,6 +4060,7 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) s16 a1[2], b0[2], b1[2]; u8 idle[2]; + u8 ppr_max; s8 target[2]; s32 num, den, pwr; u32 regval[64]; @@ -4147,7 +4139,12 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) b1[0] = b1[1] = -1393; } } - /* target[0] = target[1] = nphy->tx_power_max; */ + + ppr_max = b43_ppr_get_max(dev, &nphy->tx_pwr_max_ppr); + if (ppr_max) { + target[0] = ppr_max; + target[1] = ppr_max; + } if (dev->phy.rev >= 3) { if (sprom->fem.ghz2.tssipos) @@ -4235,8 +4232,9 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) const u32 *table = NULL; u32 rfpwr_offset; - u8 pga_gain; + u8 pga_gain, pad_gain; int i; + const s16 *uninitialized_var(rf_pwr_offset_table); table = b43_nphy_get_tx_gain_table(dev); if (!table) @@ -4252,13 +4250,27 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) nphy->gmval = (table[0] >> 16) & 0x7000; #endif + if (phy->rev >= 19) { + return; + } else if (phy->rev >= 7) { + rf_pwr_offset_table = b43_ntab_get_rf_pwr_offset_table(dev); + if (!rf_pwr_offset_table) + return; + /* TODO: Enable this once we have gains configured */ + return; + } + for (i = 0; i < 128; i++) { if (phy->rev >= 19) { /* TODO */ return; } else if (phy->rev >= 7) { - /* TODO */ - return; + pga_gain = (table[i] >> 24) & 0xf; + pad_gain = (table[i] >> 19) & 0x1f; + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + rfpwr_offset = rf_pwr_offset_table[pad_gain]; + else + rfpwr_offset = rf_pwr_offset_table[pga_gain]; } else { pga_gain = (table[i] >> 24) & 0xF; if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) @@ -5874,6 +5886,69 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) b43_mac_enable(dev); } +static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, + bool ignore_tssi) +{ + struct b43_phy *phy = &dev->phy; + struct b43_phy_n *nphy = dev->phy.n; + struct ieee80211_channel *channel = dev->wl->hw->conf.chandef.chan; + struct b43_ppr *ppr = &nphy->tx_pwr_max_ppr; + u8 max; /* qdBm */ + bool tx_pwr_state; + + if (nphy->tx_pwr_last_recalc_freq == channel->center_freq && + nphy->tx_pwr_last_recalc_limit == phy->desired_txpower) + return B43_TXPWR_RES_DONE; + + /* Make sure we have a clean PPR */ + b43_ppr_clear(dev, ppr); + + /* HW limitations */ + b43_ppr_load_max_from_sprom(dev, ppr, B43_BAND_2G); + + /* Regulatory & user settings */ + max = INT_TO_Q52(phy->chandef->chan->max_power); + if (phy->desired_txpower) + max = min_t(u8, max, INT_TO_Q52(phy->desired_txpower)); + b43_ppr_apply_max(dev, ppr, max); + if (b43_debug(dev, B43_DBG_XMITPOWER)) + b43dbg(dev->wl, "Calculated TX power: " Q52_FMT "\n", + Q52_ARG(b43_ppr_get_max(dev, ppr))); + + /* TODO: Enable this once we get gains working */ +#if 0 + /* Some extra gains */ + hw_gain = 6; /* N-PHY specific */ + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + hw_gain += sprom->antenna_gain.a0; + else + hw_gain += sprom->antenna_gain.a1; + b43_ppr_add(dev, ppr, -hw_gain); +#endif + + /* Make sure we didn't go too low */ + b43_ppr_apply_min(dev, ppr, INT_TO_Q52(8)); + + /* Apply */ + tx_pwr_state = nphy->txpwrctrl; + b43_mac_suspend(dev); + b43_nphy_tx_power_ctl_setup(dev); + if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) { + b43_maskset32(dev, B43_MMIO_MACCTL, ~0, B43_MACCTL_PHY_LOCK); + b43_read32(dev, B43_MMIO_MACCTL); + udelay(1); + } + b43_nphy_tx_power_ctrl(dev, nphy->txpwrctrl); + if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) + b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PHY_LOCK, 0); + b43_mac_enable(dev); + + nphy->tx_pwr_last_recalc_freq = channel->center_freq; + nphy->tx_pwr_last_recalc_limit = phy->desired_txpower; + + return B43_TXPWR_RES_DONE; +} + /************************************************** * N-PHY init **************************************************/ @@ -6407,6 +6482,7 @@ static int b43_nphy_op_allocate(struct b43_wldev *dev) nphy = kzalloc(sizeof(*nphy), GFP_KERNEL); if (!nphy) return -ENOMEM; + dev->phy.n = nphy; return 0; @@ -6497,26 +6573,13 @@ static inline void check_phyreg(struct b43_wldev *dev, u16 offset) #endif /* B43_DEBUG */ } -static u16 b43_nphy_op_read(struct b43_wldev *dev, u16 reg) -{ - check_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - return b43_read16(dev, B43_MMIO_PHY_DATA); -} - -static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) -{ - check_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); - b43_write16(dev, B43_MMIO_PHY_DATA, value); -} - static void b43_nphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask, u16 set) { check_phyreg(dev, reg); - b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); + b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg); b43_maskset16(dev, B43_MMIO_PHY_DATA, mask, set); + dev->phy.writes_counter = 1; } static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg) @@ -6529,7 +6592,7 @@ static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg) else reg |= 0x100; - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } @@ -6538,7 +6601,7 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) /* Register 1 is a 32-bit register. */ B43_WARN_ON(dev->phy.rev < 7 && reg == 1); - b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); + b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } @@ -6652,8 +6715,6 @@ const struct b43_phy_operations b43_phyops_n = { .free = b43_nphy_op_free, .prepare_structs = b43_nphy_op_prepare_structs, .init = b43_nphy_op_init, - .phy_read = b43_nphy_op_read, - .phy_write = b43_nphy_op_write, .phy_maskset = b43_nphy_op_maskset, .radio_read = b43_nphy_op_radio_read, .radio_write = b43_nphy_op_radio_write, @@ -6662,5 +6723,4 @@ const struct b43_phy_operations b43_phyops_n = { .switch_channel = b43_nphy_op_switch_channel, .get_default_chan = b43_nphy_op_get_default_chan, .recalc_txpower = b43_nphy_op_recalc_txpower, - .adjust_txpower = b43_nphy_op_adjust_txpower, }; diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h index 30bec815b969..a6da2c31a99c 100644 --- a/drivers/net/wireless/b43/phy_n.h +++ b/drivers/net/wireless/b43/phy_n.h @@ -2,6 +2,7 @@ #define B43_NPHY_H_ #include "phy_common.h" +#include "ppr.h" /* N-PHY registers. */ @@ -967,6 +968,9 @@ struct b43_phy_n { struct b43_phy_n_txpwrindex txpwrindex[2]; struct b43_phy_n_pwr_ctl_info pwr_ctl_info[2]; struct b43_chanspec txiqlocal_chanspec; + struct b43_ppr tx_pwr_max_ppr; + u16 tx_pwr_last_recalc_freq; + int tx_pwr_last_recalc_limit; u8 txrx_chain; u16 tx_rx_cal_phy_saveregs[11]; diff --git a/drivers/net/wireless/b43/ppr.c b/drivers/net/wireless/b43/ppr.c new file mode 100644 index 000000000000..9a770279c415 --- /dev/null +++ b/drivers/net/wireless/b43/ppr.c @@ -0,0 +1,199 @@ +/* + * Broadcom B43 wireless driver + * PPR (Power Per Rate) management + * + * Copyright (c) 2014 RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "ppr.h" +#include "b43.h" + +#define ppr_for_each_entry(ppr, i, entry) \ + for (i = 0, entry = &(ppr)->__all_rates[i]; \ + i < B43_PPR_RATES_NUM; \ + i++, entry++) + +void b43_ppr_clear(struct b43_wldev *dev, struct b43_ppr *ppr) +{ + memset(ppr, 0, sizeof(*ppr)); + + /* Compile-time PPR check */ + BUILD_BUG_ON(sizeof(struct b43_ppr) != B43_PPR_RATES_NUM * sizeof(u8)); +} + +void b43_ppr_add(struct b43_wldev *dev, struct b43_ppr *ppr, int diff) +{ + int i; + u8 *rate; + + ppr_for_each_entry(ppr, i, rate) { + *rate = clamp_val(*rate + diff, 0, 127); + } +} + +void b43_ppr_apply_max(struct b43_wldev *dev, struct b43_ppr *ppr, u8 max) +{ + int i; + u8 *rate; + + ppr_for_each_entry(ppr, i, rate) { + *rate = min(*rate, max); + } +} + +void b43_ppr_apply_min(struct b43_wldev *dev, struct b43_ppr *ppr, u8 min) +{ + int i; + u8 *rate; + + ppr_for_each_entry(ppr, i, rate) { + *rate = max(*rate, min); + } +} + +u8 b43_ppr_get_max(struct b43_wldev *dev, struct b43_ppr *ppr) +{ + u8 res = 0; + int i; + u8 *rate; + + ppr_for_each_entry(ppr, i, rate) { + res = max(*rate, res); + } + + return res; +} + +bool b43_ppr_load_max_from_sprom(struct b43_wldev *dev, struct b43_ppr *ppr, + enum b43_band band) +{ + struct b43_ppr_rates *rates = &ppr->rates; + struct ssb_sprom *sprom = dev->dev->bus_sprom; + struct b43_phy *phy = &dev->phy; + u8 maxpwr, off; + u32 sprom_ofdm_po; + u16 *sprom_mcs_po; + u8 extra_cdd_po, extra_stbc_po; + int i; + + switch (band) { + case B43_BAND_2G: + maxpwr = min(sprom->core_pwr_info[0].maxpwr_2g, + sprom->core_pwr_info[1].maxpwr_2g); + sprom_ofdm_po = sprom->ofdm2gpo; + sprom_mcs_po = sprom->mcs2gpo; + extra_cdd_po = (sprom->cddpo >> 0) & 0xf; + extra_stbc_po = (sprom->stbcpo >> 0) & 0xf; + break; + case B43_BAND_5G_LO: + maxpwr = min(sprom->core_pwr_info[0].maxpwr_5gl, + sprom->core_pwr_info[1].maxpwr_5gl); + sprom_ofdm_po = sprom->ofdm5glpo; + sprom_mcs_po = sprom->mcs5glpo; + extra_cdd_po = (sprom->cddpo >> 8) & 0xf; + extra_stbc_po = (sprom->stbcpo >> 8) & 0xf; + break; + case B43_BAND_5G_MI: + maxpwr = min(sprom->core_pwr_info[0].maxpwr_5g, + sprom->core_pwr_info[1].maxpwr_5g); + sprom_ofdm_po = sprom->ofdm5gpo; + sprom_mcs_po = sprom->mcs5gpo; + extra_cdd_po = (sprom->cddpo >> 4) & 0xf; + extra_stbc_po = (sprom->stbcpo >> 4) & 0xf; + break; + case B43_BAND_5G_HI: + maxpwr = min(sprom->core_pwr_info[0].maxpwr_5gh, + sprom->core_pwr_info[1].maxpwr_5gh); + sprom_ofdm_po = sprom->ofdm5ghpo; + sprom_mcs_po = sprom->mcs5ghpo; + extra_cdd_po = (sprom->cddpo >> 12) & 0xf; + extra_stbc_po = (sprom->stbcpo >> 12) & 0xf; + break; + default: + WARN_ON_ONCE(1); + return false; + } + + if (band == B43_BAND_2G) { + for (i = 0; i < 4; i++) { + off = ((sprom->cck2gpo >> (i * 4)) & 0xf) * 2; + rates->cck[i] = maxpwr - off; + } + } + + /* OFDM */ + for (i = 0; i < 8; i++) { + off = ((sprom_ofdm_po >> (i * 4)) & 0xf) * 2; + rates->ofdm[i] = maxpwr - off; + } + + /* MCS 20 SISO */ + rates->mcs_20[0] = rates->ofdm[0]; + rates->mcs_20[1] = rates->ofdm[2]; + rates->mcs_20[2] = rates->ofdm[3]; + rates->mcs_20[3] = rates->ofdm[4]; + rates->mcs_20[4] = rates->ofdm[5]; + rates->mcs_20[5] = rates->ofdm[6]; + rates->mcs_20[6] = rates->ofdm[7]; + rates->mcs_20[7] = rates->ofdm[7]; + + /* MCS 20 CDD */ + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[0] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_cdd[i] = maxpwr - off; + if (phy->type == B43_PHYTYPE_N && phy->rev >= 3) + rates->mcs_20_cdd[i] -= extra_cdd_po; + } + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[1] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_cdd[4 + i] = maxpwr - off; + if (phy->type == B43_PHYTYPE_N && phy->rev >= 3) + rates->mcs_20_cdd[4 + i] -= extra_cdd_po; + } + + /* OFDM 20 CDD */ + rates->ofdm_20_cdd[0] = rates->mcs_20_cdd[0]; + rates->ofdm_20_cdd[1] = rates->mcs_20_cdd[0]; + rates->ofdm_20_cdd[2] = rates->mcs_20_cdd[1]; + rates->ofdm_20_cdd[3] = rates->mcs_20_cdd[2]; + rates->ofdm_20_cdd[4] = rates->mcs_20_cdd[3]; + rates->ofdm_20_cdd[5] = rates->mcs_20_cdd[4]; + rates->ofdm_20_cdd[6] = rates->mcs_20_cdd[5]; + rates->ofdm_20_cdd[7] = rates->mcs_20_cdd[6]; + + /* MCS 20 STBC */ + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[0] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_stbc[i] = maxpwr - off; + if (phy->type == B43_PHYTYPE_N && phy->rev >= 3) + rates->mcs_20_stbc[i] -= extra_stbc_po; + } + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[1] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_stbc[4 + i] = maxpwr - off; + if (phy->type == B43_PHYTYPE_N && phy->rev >= 3) + rates->mcs_20_stbc[4 + i] -= extra_stbc_po; + } + + /* MCS 20 SDM */ + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[2] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_sdm[i] = maxpwr - off; + } + for (i = 0; i < 4; i++) { + off = ((sprom_mcs_po[3] >> (i * 4)) & 0xf) * 2; + rates->mcs_20_sdm[4 + i] = maxpwr - off; + } + + return true; +} diff --git a/drivers/net/wireless/b43/ppr.h b/drivers/net/wireless/b43/ppr.h new file mode 100644 index 000000000000..24d7447e9f01 --- /dev/null +++ b/drivers/net/wireless/b43/ppr.h @@ -0,0 +1,45 @@ +#ifndef LINUX_B43_PPR_H_ +#define LINUX_B43_PPR_H_ + +#include <linux/types.h> + +#define B43_PPR_CCK_RATES_NUM 4 +#define B43_PPR_OFDM_RATES_NUM 8 +#define B43_PPR_MCS_RATES_NUM 8 + +#define B43_PPR_RATES_NUM (B43_PPR_CCK_RATES_NUM + \ + B43_PPR_OFDM_RATES_NUM * 2 + \ + B43_PPR_MCS_RATES_NUM * 4) + +struct b43_ppr_rates { + u8 cck[B43_PPR_CCK_RATES_NUM]; + u8 ofdm[B43_PPR_OFDM_RATES_NUM]; + u8 ofdm_20_cdd[B43_PPR_OFDM_RATES_NUM]; + u8 mcs_20[B43_PPR_MCS_RATES_NUM]; /* SISO */ + u8 mcs_20_cdd[B43_PPR_MCS_RATES_NUM]; + u8 mcs_20_stbc[B43_PPR_MCS_RATES_NUM]; + u8 mcs_20_sdm[B43_PPR_MCS_RATES_NUM]; +}; + +struct b43_ppr { + /* All powers are in qdbm (Q5.2) */ + union { + u8 __all_rates[B43_PPR_RATES_NUM]; + struct b43_ppr_rates rates; + }; +}; + +struct b43_wldev; +enum b43_band; + +void b43_ppr_clear(struct b43_wldev *dev, struct b43_ppr *ppr); + +void b43_ppr_add(struct b43_wldev *dev, struct b43_ppr *ppr, int diff); +void b43_ppr_apply_max(struct b43_wldev *dev, struct b43_ppr *ppr, u8 max); +void b43_ppr_apply_min(struct b43_wldev *dev, struct b43_ppr *ppr, u8 min); +u8 b43_ppr_get_max(struct b43_wldev *dev, struct b43_ppr *ppr); + +bool b43_ppr_load_max_from_sprom(struct b43_wldev *dev, struct b43_ppr *ppr, + enum b43_band band); + +#endif /* LINUX_B43_PPR_H_ */ diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index 4b5885077b01..25d1cbd34306 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -2878,6 +2878,40 @@ const s8 b43_ntab_papd_pga_gain_delta_ipa_2g[] = { -54, -46, -39, -31, -23, -15, -8, 0 }; +/* Extracted from MMIO dump of 6.30.223.248 + * Entries: 0, 15, 17, 21, 24, 26, 27, 29, 30 were guessed + */ +static const s16 b43_ntab_rf_pwr_offset_2057_rev9_2g[] = { + -133, -133, -107, -92, -81, + -73, -66, -61, -56, -52, + -48, -44, -41, -37, -34, + -31, -28, -25, -22, -19, + -17, -14, -12, -10, -9, + -7, -5, -4, -3, -2, + -1, 0, +}; + +/* Extracted from MMIO dump of 6.30.223.248 */ +static const s16 b43_ntab_rf_pwr_offset_2057_rev9_5g[] = { + -101, -94, -86, -79, -72, + -65, -57, -50, -42, -35, + -28, -21, -16, -9, -4, + 0, +}; + +/* Extracted from MMIO dump of 6.30.223.248 + * Entries: 0, 26, 28, 29, 30, 31 were guessed + */ +static const s16 b43_ntab_rf_pwr_offset_2057_rev14_2g[] = { + -111, -111, -111, -84, -70, + -59, -52, -45, -40, -36, + -32, -29, -26, -23, -21, + -18, -16, -15, -13, -11, + -10, -8, -7, -6, -5, + -4, -4, -3, -3, -2, + -2, -1, +}; + const u16 tbl_iqcal_gainparams[2][9][8] = { { { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 }, @@ -3197,7 +3231,7 @@ static struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = { { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 0x527E, /* invalid for external LNA! */ { 0x513F, 0x513F, 0x513F, 0x513F }, /* invalid for external LNA! */ - 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */ + 0x007E, 0x0066, 0x0000, /* low is invalid (the last one) */ 0x18, 0x18, 0x18, 0x01D0, 0x5, }, @@ -3708,9 +3742,43 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev) } } +const s16 *b43_ntab_get_rf_pwr_offset_table(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { + switch (phy->rev) { + case 17: + if (phy->radio_rev == 14) + return b43_ntab_rf_pwr_offset_2057_rev14_2g; + break; + case 16: + if (phy->radio_rev == 9) + return b43_ntab_rf_pwr_offset_2057_rev9_2g; + break; + } + + b43err(dev->wl, + "No 2GHz RF power table available for this device\n"); + return NULL; + } else { + switch (phy->rev) { + case 16: + if (phy->radio_rev == 9) + return b43_ntab_rf_pwr_offset_2057_rev9_5g; + break; + } + + b43err(dev->wl, + "No 5GHz RF power table available for this device\n"); + return NULL; + } +} + struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( struct b43_wldev *dev, bool ghz5, bool ext_lna) { + struct b43_phy *phy = &dev->phy; struct nphy_gain_ctl_workaround_entry *e; u8 phy_idx; @@ -3729,37 +3797,49 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( e = &nphy_gain_ctl_workaround[ghz5][phy_idx]; /* Some workarounds to the workarounds... */ - if (ghz5 && dev->phy.rev >= 6) { - if (dev->phy.radio_rev == 11 && - !b43_is_40mhz(dev)) - e->cliplo_gain = 0x2d; - } else if (!ghz5 && dev->phy.rev >= 5) { - static const int gain_data[] = {0x0062, 0x0064, 0x006a, 0x106a, - 0x106c, 0x1074, 0x107c, 0x207c}; + if (!ghz5) { u8 tr_iso = dev->dev->bus_sprom->fem.ghz2.tr_iso; - if (ext_lna) { + if (tr_iso > 7) + tr_iso = 3; + + if (phy->rev >= 6) { + static const int gain_data[] = { 0x106a, 0x106c, 0x1074, + 0x107c, 0x007e, 0x107e, + 0x207e, 0x307e, }; + + e->cliplo_gain = gain_data[tr_iso]; + } else if (phy->rev == 5) { + static const int gain_data[] = { 0x0062, 0x0064, 0x006a, + 0x106a, 0x106c, 0x1074, + 0x107c, 0x207c, }; + + e->cliplo_gain = gain_data[tr_iso]; + } + + if (phy->rev >= 5 && ext_lna) { e->rfseq_init[0] &= ~0x4000; e->rfseq_init[1] &= ~0x4000; e->rfseq_init[2] &= ~0x4000; e->rfseq_init[3] &= ~0x4000; e->init_gain &= ~0x4000; } - if (tr_iso > 7) - tr_iso = 3; - e->cliplo_gain = gain_data[tr_iso]; - - } else if (ghz5 && dev->phy.rev == 4 && ext_lna) { - e->rfseq_init[0] &= ~0x4000; - e->rfseq_init[1] &= ~0x4000; - e->rfseq_init[2] &= ~0x4000; - e->rfseq_init[3] &= ~0x4000; - e->init_gain &= ~0x4000; - e->rfseq_init[0] |= 0x1000; - e->rfseq_init[1] |= 0x1000; - e->rfseq_init[2] |= 0x1000; - e->rfseq_init[3] |= 0x1000; - e->init_gain |= 0x1000; + } else { + if (phy->rev >= 6) { + if (phy->radio_rev == 11 && !b43_is_40mhz(dev)) + e->crsminu = 0x2d; + } else if (phy->rev == 4 && ext_lna) { + e->rfseq_init[0] &= ~0x4000; + e->rfseq_init[1] &= ~0x4000; + e->rfseq_init[2] &= ~0x4000; + e->rfseq_init[3] &= ~0x4000; + e->init_gain &= ~0x4000; + e->rfseq_init[0] |= 0x1000; + e->rfseq_init[1] |= 0x1000; + e->rfseq_init[2] |= 0x1000; + e->rfseq_init[3] |= 0x1000; + e->init_gain |= 0x1000; + } } return e; diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h index 3ce2e6f3a278..b51f386db02f 100644 --- a/drivers/net/wireless/b43/tables_nphy.h +++ b/drivers/net/wireless/b43/tables_nphy.h @@ -191,6 +191,8 @@ void b43_nphy_tables_init(struct b43_wldev *dev); const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev); +const s16 *b43_ntab_get_rf_pwr_offset_table(struct b43_wldev *dev); + extern const s8 b43_ntab_papd_pga_gain_delta_ipa_2g[]; extern const u16 tbl_iqcal_gainparams[2][9][8]; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index 057b982ea8b3..1d78a91db594 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -1431,8 +1431,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, IEEE80211_BAND_5GHZ); wdev = &ifp->vif->wdev; - cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0, - GFP_ATOMIC); + cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0); kfree(mgmt_frame); return 0; @@ -1896,8 +1895,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ); - cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, - GFP_ATOMIC); + cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0); brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n", mgmt_frame_len, e->datalen, chanspec, freq); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index f3a9804988a6..54fc1a1cb4f7 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -2397,9 +2397,13 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval); brcmf_dbg(CONN, "Signal: %d\n", notify_signal); - bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID, - 0, notify_capability, notify_interval, notify_ie, - notify_ielen, notify_signal, GFP_KERNEL); + bss = cfg80211_inform_bss(wiphy, notify_channel, + CFG80211_BSS_FTYPE_UNKNOWN, + (const u8 *)bi->BSSID, + 0, notify_capability, + notify_interval, notify_ie, + notify_ielen, notify_signal, + GFP_KERNEL); if (!bss) return -ENOMEM; @@ -2501,9 +2505,11 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg, brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval); brcmf_dbg(CONN, "signal: %d\n", notify_signal); - bss = cfg80211_inform_bss(wiphy, notify_channel, bssid, - 0, notify_capability, notify_interval, - notify_ie, notify_ielen, notify_signal, GFP_KERNEL); + bss = cfg80211_inform_bss(wiphy, notify_channel, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, 0, + notify_capability, notify_interval, + notify_ie, notify_ielen, notify_signal, + GFP_KERNEL); if (!bss) { err = -ENOMEM; diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c index 40078f5f932e..964b64ab7fe3 100644 --- a/drivers/net/wireless/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/cw1200/cw1200_spi.c @@ -398,7 +398,7 @@ static int cw1200_spi_probe(struct spi_device *func) return -1; } - self = kzalloc(sizeof(*self), GFP_KERNEL); + self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL); if (!self) { pr_err("Can't allocate SPI hwbus_priv."); return -ENOMEM; @@ -424,7 +424,6 @@ static int cw1200_spi_probe(struct spi_device *func) if (status) { cw1200_spi_irq_unsubscribe(self); cw1200_spi_off(plat_data); - kfree(self); } return status; @@ -441,7 +440,6 @@ static int cw1200_spi_disconnect(struct spi_device *func) cw1200_core_release(self->core); self->core = NULL; } - kfree(self); } cw1200_spi_off(dev_get_platdata(&func->dev)); diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index a42f9c335090..f0c3c77a48d3 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -5552,7 +5552,7 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, min(network->ssid_len, priv->essid_len)))) { char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; - strncpy(escaped, + strlcpy(escaped, print_ssid(ssid, network->ssid, network->ssid_len), sizeof(escaped)); @@ -5765,7 +5765,7 @@ static int ipw_best_network(struct ipw_priv *priv, memcmp(network->ssid, priv->essid, min(network->ssid_len, priv->essid_len)))) { char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; - strncpy(escaped, + strlcpy(escaped, print_ssid(ssid, network->ssid, network->ssid_len), sizeof(escaped)); @@ -5782,7 +5782,7 @@ static int ipw_best_network(struct ipw_priv *priv, * testing everything else. */ if (match->network && match->network->stats.rssi > network->stats.rssi) { char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; - strncpy(escaped, + strlcpy(escaped, print_ssid(ssid, network->ssid, network->ssid_len), sizeof(escaped)); IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because " diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 824f5e287783..267e48a2915e 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -85,6 +85,16 @@ config IWLWIFI_BCAST_FILTERING If unsure, don't enable this option, as some programs might expect incoming broadcasts for their normal operations. +config IWLWIFI_UAPSD + bool "enable U-APSD by default" + depends on IWLMVM + help + Say Y here to enable U-APSD by default. This may cause + interoperability problems with some APs, manifesting in lower than + expected throughput due to those APs not enabling aggregation + + If unsure, say N. + menu "Debugging Options" config IWLWIFI_DEBUG diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 3255a1723d17..d1ce3ce13591 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -580,7 +580,7 @@ turn_off: * time, or we hadn't time to drain the AC queues. */ if (agg_state == IWL_AGG_ON) - iwl_trans_txq_disable(priv->trans, txq_id); + iwl_trans_txq_disable(priv->trans, txq_id, true); else IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", agg_state); @@ -686,7 +686,7 @@ int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif, * time, or we hadn't time to drain the AC queues. */ if (agg_state == IWL_AGG_ON) - iwl_trans_txq_disable(priv->trans, txq_id); + iwl_trans_txq_disable(priv->trans, txq_id, true); else IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n", agg_state); @@ -781,7 +781,7 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid) "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); iwl_trans_txq_disable(priv->trans, - tid_data->agg.txq_id); + tid_data->agg.txq_id, true); iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id); tid_data->agg.state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index d53adc245497..8e99dffa88e8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index e93c6972290b..23a67bfc086f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index fe129c94ae3e..23d059af6476 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index 295083510e72..0a70bcd241f5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -145,6 +145,7 @@ do { \ #define IWL_DL_HCMD 0x00000004 #define IWL_DL_STATE 0x00000008 /* 0x000000F0 - 0x00000010 */ +#define IWL_DL_QUOTA 0x00000010 #define IWL_DL_TE 0x00000020 #define IWL_DL_EEPROM 0x00000040 #define IWL_DL_RADIO 0x00000080 @@ -189,6 +190,7 @@ do { \ #define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) #define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) #define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) +#define IWL_DEBUG_QUOTA(p, f, a...) IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a) #define IWL_DEBUG_TE(p, f, a...) IWL_DEBUG(p, IWL_DL_TE, f, ## a) #define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a) #define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c index 23e7351e02de..90987d6f348e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c @@ -36,15 +36,8 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_info); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_warn); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_crit); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_err); -EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dbg); #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c index 77e3178040b2..aefd94cb6e91 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1254,7 +1256,9 @@ struct iwl_mod_params iwlwifi_mod_params = { .bt_coex_active = true, .power_level = IWL_POWER_INDEX_1, .wd_disable = true, - .uapsd_disable = false, +#ifndef CONFIG_IWLWIFI_UAPSD + .uapsd_disable = true, +#endif /* CONFIG_IWLWIFI_UAPSD */ /* the rest are 0 by default */ }; IWL_EXPORT_SYMBOL(iwlwifi_mod_params); @@ -1370,7 +1374,11 @@ MODULE_PARM_DESC(nvm_file, "NVM file name"); module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, bool, S_IRUGO); +#ifdef CONFIG_IWLWIFI_UAPSD MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)"); +#else +MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)"); +#endif /* * set bt_coex_active to true, uCode will do kill/defer diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h index 3c72cb710b0c..be4f8972241a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-drv.h +++ b/drivers/net/wireless/iwlwifi/iwl-drv.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h index de5994a776c7..e30a41d04c8b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index 929a8063354c..401f7be36b93 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h index 1bb5193c5b1b..f68cba4e0444 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -125,6 +127,8 @@ enum iwl_ucode_tlv_flag { * @IWL_UCODE_TLV_API_CSA_FLOW: ucode can do unbind-bind flow for CSA. * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit. * @IWL_UCODE_TLV_API_LMAC_SCAN: This ucode uses LMAC unified scan API. + * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time + * longer than the passive one, which is essential for fragmented scan. */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), @@ -133,6 +137,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_CSA_FLOW = BIT(4), IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5), IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), + IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), }; /** diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 354255f08754..40718f814f8d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h index 99785c892f96..b6d666ee8359 100644 --- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 47033a35a402..1560f4576c7d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -281,6 +283,7 @@ #define SCD_CHAINEXT_EN (SCD_BASE + 0x244) #define SCD_AGGR_SEL (SCD_BASE + 0x248) #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) +#define SCD_EN_CTRL (SCD_BASE + 0x254) static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl) { diff --git a/drivers/net/wireless/iwlwifi/iwl-scd.h b/drivers/net/wireless/iwlwifi/iwl-scd.h new file mode 100644 index 000000000000..6c622b21bba7 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-scd.h @@ -0,0 +1,118 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_scd_h__ +#define __iwl_scd_h__ + +#include "iwl-trans.h" +#include "iwl-io.h" +#include "iwl-prph.h" + + +static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); +} + +static inline void iwl_scd_txq_enable_agg(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); +} + +static inline void iwl_scd_txq_disable_agg(struct iwl_trans *trans, + u16 txq_id) +{ + iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); +} + +static inline void iwl_scd_disable_agg(struct iwl_trans *trans) +{ + iwl_set_bits_prph(trans, SCD_AGGR_SEL, 0); +} + +static inline void iwl_scd_activate_fifos(struct iwl_trans *trans) +{ + iwl_write_prph(trans, SCD_TXFACT, IWL_MASK(0, 7)); +} + +static inline void iwl_scd_deactivate_fifos(struct iwl_trans *trans) +{ + iwl_write_prph(trans, SCD_TXFACT, 0); +} + +static inline void iwl_scd_enable_set_active(struct iwl_trans *trans, + u32 value) +{ + iwl_write_prph(trans, SCD_EN_CTRL, value); +} +#endif diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 656371a668da..c89985a58803 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -401,6 +403,14 @@ struct iwl_trans_dump_data { struct iwl_trans; +struct iwl_trans_txq_scd_cfg { + u8 fifo; + s8 sta_id; + u8 tid; + bool aggregate; + int frame_limit; +}; + /** * struct iwl_trans_ops - transport specific operations * @@ -437,7 +447,9 @@ struct iwl_trans; * Must be atomic * @txq_enable: setup a queue. To setup an AC queue, use the * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before - * this one. The op_mode must not configure the HCMD queue. May sleep. + * this one. The op_mode must not configure the HCMD queue. The scheduler + * configuration may be %NULL, in which case the hardware will not be + * configured. May sleep. * @txq_disable: de-configure a Tx queue to send AMPDUs * Must be atomic * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. @@ -492,9 +504,10 @@ struct iwl_trans_ops { void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs); - void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn); - void (*txq_disable)(struct iwl_trans *trans, int queue); + void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg); + void (*txq_disable)(struct iwl_trans *trans, int queue, + bool configure_scd); int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); @@ -766,29 +779,57 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, trans->ops->reclaim(trans, queue, ssn, skbs); } -static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue) +static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd) { - trans->ops->txq_disable(trans, queue); + trans->ops->txq_disable(trans, queue, configure_scd); } -static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, - int fifo, int sta_id, int tid, - int frame_limit, u16 ssn) +static inline void +iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg) { might_sleep(); if (unlikely((trans->state != IWL_TRANS_FW_ALIVE))) IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - trans->ops->txq_enable(trans, queue, fifo, sta_id, tid, - frame_limit, ssn); + trans->ops->txq_enable(trans, queue, ssn, cfg); +} + +static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, + int fifo, int sta_id, int tid, + int frame_limit, u16 ssn) +{ + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .sta_id = sta_id, + .tid = tid, + .frame_limit = frame_limit, + .aggregate = sta_id >= 0, + }; + + iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg); } static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo) { - iwl_trans_txq_enable(trans, queue, fifo, -1, - IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0); + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = fifo, + .sta_id = -1, + .tid = IWL_MAX_TID_COUNT, + .frame_limit = IWL_FRAME_LIMIT, + .aggregate = false, + }; + + iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg); +} + +static inline void +iwl_trans_txq_enable_no_scd(struct iwl_trans *trans, int queue, u16 ssn) +{ + iwl_trans_txq_enable_cfg(trans, queue, ssn, NULL); } static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans, diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index ce71625f497f..6e8f3e2aef74 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c index a3be33359927..585c0ab4a3ec 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h index ca79f7160573..dd00e8f7f765 100644 --- a/drivers/net/wireless/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/iwlwifi/mvm/constants.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 645b3cfc29a5..c17be0fb7283 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -700,7 +702,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return ret; rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false); + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (ret) return ret; diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c index 87e517bffedc..9aa2311a776c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -118,6 +120,10 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val); dbgfs_pm->uapsd_misbehaving = val; break; + case MVM_DEBUGFS_PM_USE_PS_POLL: + IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val); + dbgfs_pm->use_ps_poll = val; + break; } } @@ -168,6 +174,10 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf, if (sscanf(buf + 18, "%d", &val) != 1) return -EINVAL; param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING; + } else if (!strncmp("use_ps_poll=", buf, 12)) { + if (sscanf(buf + 12, "%d", &val) != 1) + return -EINVAL; + param = MVM_DEBUGFS_PM_USE_PS_POLL; } else { return -EINVAL; } diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c index 7d18f466fbb3..d98ee109c5e9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -257,6 +259,70 @@ static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, return count; } +static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos; + + if (!mvm->temperature_test) + pos = scnprintf(buf , sizeof(buf), "disabled\n"); + else + pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +/* + * Set NIC Temperature + * Cause the driver to ignore the actual NIC temperature reported by the FW + * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN - + * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX + * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE + */ +static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + int temperature; + + if (kstrtoint(buf, 10, &temperature)) + return -EINVAL; + /* not a legal temperature */ + if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX && + temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) || + temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN) + return -EINVAL; + + mutex_lock(&mvm->mutex); + if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) { + if (!mvm->temperature_test) + goto out; + + mvm->temperature_test = false; + /* Since we can't read the temp while awake, just set + * it to zero until we get the next RX stats from the + * firmware. + */ + mvm->temperature = 0; + } else { + mvm->temperature_test = true; + mvm->temperature = temperature; + } + IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n", + mvm->temperature_test ? "En" : "Dis" , + mvm->temperature); + /* handle the temperature change */ + iwl_mvm_tt_handler(mvm); + +out: + mutex_unlock(&mvm->mutex); + + return count; +} + static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -1296,6 +1362,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_FILE_OPS(stations); MVM_DEBUGFS_READ_FILE_OPS(bt_notif); MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); @@ -1336,6 +1403,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); + MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, + S_IWUSR | S_IRUSR); MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); @@ -1380,6 +1449,13 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) goto err; #endif + if (!debugfs_create_u8("low_latency_agg_frame_limit", S_IRUSR | S_IWUSR, + mvm->debugfs_dir, + &mvm->low_latency_agg_frame_limit)) + goto err; + if (!debugfs_create_u8("ps_disabled", S_IRUSR, + mvm->debugfs_dir, &mvm->ps_disabled)) + goto err; if (!debugfs_create_blob("nvm_hw", S_IRUSR, mvm->debugfs_dir, &mvm->nvm_hw_blob)) goto err; diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/iwlwifi/mvm/debugfs.h index e3a9774af495..8c4190e7e027 100644 --- a/drivers/net/wireless/iwlwifi/mvm/debugfs.h +++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h index 69875716dcdb..816883f9ff94 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h index 13696fe419b7..e74cdf2132f8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h index c3a8c86b550d..27dd86395b39 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index c02a9e45ec5e..8f2216694004 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h index 47bd0406355d..21dd5b771660 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 9a922f3bd16b..541b844c6b5d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -73,16 +75,20 @@ #include "fw-api-coex.h" #include "fw-api-scan.h" -/* maximal number of Tx queues in any platform */ -#define IWL_MVM_MAX_QUEUES 20 - /* Tx queue numbers */ enum { IWL_MVM_OFFCHANNEL_QUEUE = 8, IWL_MVM_CMD_QUEUE = 9, }; -#define IWL_MVM_CMD_FIFO 7 +enum iwl_mvm_tx_fifo { + IWL_MVM_TX_FIFO_BK = 0, + IWL_MVM_TX_FIFO_BE, + IWL_MVM_TX_FIFO_VI, + IWL_MVM_TX_FIFO_VO, + IWL_MVM_TX_FIFO_MCAST = 5, + IWL_MVM_TX_FIFO_CMD = 7, +}; #define IWL_MVM_STATION_COUNT 16 @@ -184,6 +190,8 @@ enum { REPLY_RX_MPDU_CMD = 0xc1, BA_NOTIF = 0xc5, + MARKER_CMD = 0xcb, + /* BT Coex */ BT_COEX_PRIO_TABLE = 0xcc, BT_COEX_PROT_ENV = 0xcd, @@ -1307,6 +1315,38 @@ struct iwl_bcast_filter_cmd { struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; } __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ +/* + * enum iwl_mvm_marker_id - maker ids + * + * The ids for different type of markers to insert into the usniffer logs + */ +enum iwl_mvm_marker_id { + MARKER_ID_TX_FRAME_LATENCY = 1, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_mvm_marker - mark info into the usniffer logs + * + * (MARKER_CMD = 0xcb) + * + * Mark the UTC time stamp into the usniffer logs together with additional + * metadata, so the usniffer output can be parsed. + * In the command response the ucode will return the GP2 time. + * + * @dw_len: The amount of dwords following this byte including this byte. + * @marker_id: A unique marker id (iwl_mvm_marker_id). + * @reserved: reserved. + * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC + * @metadata: additional meta data that will be written to the unsiffer log + */ +struct iwl_mvm_marker { + u8 dwLen; + u8 markerId; + __le16 reserved; + __le64 timestamp; + __le32 metadata[0]; +} __packed; /* MARKER_API_S_VER_1 */ + struct mvm_statistics_dbg { __le32 burst_check; __le32 burst_count; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 883e702152d5..21d606028ca6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -242,10 +244,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, mvm->queue_to_mac80211[i] = i; else mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE; - atomic_set(&mvm->queue_stop_count[i], 0); } - mvm->transport_queue_stop = 0; + for (i = 0; i < IEEE80211_MAX_QUEUES; i++) + atomic_set(&mvm->mac80211_queue_stop_count[i], 0); mvm->ucode_loaded = true; diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index 8242e689ddb1..158aed501473 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -81,7 +83,7 @@ struct iwl_mvm_mac_iface_iterator_data { struct ieee80211_vif *vif; unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)]; unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)]; - unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_MAX_QUEUES)]; + u32 used_hw_queues; enum iwl_tsf_id preferred_tsf; bool found_vif; }; @@ -192,12 +194,31 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, data->preferred_tsf = NUM_TSF_IDS; } +/* + * Get the mask of the queues used by the vif + */ +u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + u32 qmask = 0, ac; + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + return BIT(IWL_MVM_OFFCHANNEL_QUEUE); + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) + qmask |= BIT(vif->hw_queue[ac]); + + if (vif->type == NL80211_IFTYPE_AP) + qmask |= BIT(vif->cab_queue); + + return qmask; +} + static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { struct iwl_mvm_mac_iface_iterator_data *data = _data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 ac; /* Iterator may already find the interface being added -- skip it */ if (vif == data->vif) { @@ -206,12 +227,7 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, } /* Mark the queues used by the vif */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) - __set_bit(vif->hw_queue[ac], data->used_hw_queues); - - if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) - __set_bit(vif->cab_queue, data->used_hw_queues); + data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(data->mvm, vif); /* Mark MAC IDs as used by clearing the available bit, and * (below) mark TSFs as used if their existing use is not @@ -225,24 +241,6 @@ static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, iwl_mvm_mac_tsf_id_iter(_data, mac, vif); } -/* - * Get the mask of the queus used by the vif - */ -u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - u32 qmask = 0, ac; - - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) - return BIT(IWL_MVM_OFFCHANNEL_QUEUE); - - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) - qmask |= BIT(vif->hw_queue[ac]); - - return qmask; -} - void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -277,15 +275,15 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, .available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 }, /* no preference yet */ .preferred_tsf = NUM_TSF_IDS, - .used_hw_queues = { + .used_hw_queues = BIT(IWL_MVM_OFFCHANNEL_QUEUE) | BIT(mvm->aux_queue) | - BIT(IWL_MVM_CMD_QUEUE) - }, + BIT(IWL_MVM_CMD_QUEUE), .found_vif = false, }; u32 ac; int ret, i; + unsigned long used_hw_queues; /* * Allocate a MAC ID and a TSF for this MAC, along with the queues @@ -368,9 +366,11 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return 0; } + used_hw_queues = data.used_hw_queues; + /* Find available queues, and allocate them to the ACs */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - u8 queue = find_first_zero_bit(data.used_hw_queues, + u8 queue = find_first_zero_bit(&used_hw_queues, mvm->first_agg_queue); if (queue >= mvm->first_agg_queue) { @@ -379,13 +379,13 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, goto exit_fail; } - __set_bit(queue, data.used_hw_queues); + __set_bit(queue, &used_hw_queues); vif->hw_queue[ac] = queue; } /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP) { - u8 queue = find_first_zero_bit(data.used_hw_queues, + u8 queue = find_first_zero_bit(&used_hw_queues, mvm->first_agg_queue); if (queue >= mvm->first_agg_queue) { @@ -452,14 +452,16 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: - iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE); + iwl_trans_txq_disable(mvm->trans, IWL_MVM_OFFCHANNEL_QUEUE, + true); break; case NL80211_IFTYPE_AP: - iwl_trans_txq_disable(mvm->trans, vif->cab_queue); + iwl_trans_txq_disable(mvm->trans, vif->cab_queue, true); /* fall through */ default: for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac]); + iwl_trans_txq_disable(mvm->trans, vif->hw_queue[ac], + true); } } @@ -586,6 +588,7 @@ static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm, static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_ctx_cmd *cmd, + const u8 *bssid_override, u32 action) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -593,6 +596,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, bool ht_enabled = !!(vif->bss_conf.ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); u8 cck_ack_rates, ofdm_ack_rates; + const u8 *bssid = bssid_override ?: vif->bss_conf.bssid; int i; cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, @@ -625,8 +629,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id); memcpy(cmd->node_addr, vif->addr, ETH_ALEN); - if (vif->bss_conf.bssid) - memcpy(cmd->bssid_addr, vif->bss_conf.bssid, ETH_ALEN); + + if (bssid) + memcpy(cmd->bssid_addr, bssid, ETH_ALEN); else eth_broadcast_addr(cmd->bssid_addr); @@ -695,7 +700,8 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 action, bool force_assoc_off) + u32 action, bool force_assoc_off, + const u8 *bssid_override) { struct iwl_mac_ctx_cmd cmd = {}; struct iwl_mac_data_sta *ctxt_sta; @@ -703,7 +709,7 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_STATION); /* Fill the common data for all mac context types */ - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action); if (vif->p2p) { struct ieee80211_p2p_noa_attr *noa = @@ -784,7 +790,7 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | @@ -805,7 +811,7 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | MAC_FILTER_IN_PROBE_REQUEST); @@ -844,7 +850,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE); - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); @@ -1072,7 +1078,7 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p); /* Fill the common data for all mac context types */ - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* * pass probe requests and beacons from other APs (needed @@ -1098,7 +1104,7 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p); /* Fill the common data for all mac context types */ - iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); + iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); /* * pass probe requests and beacons from other APs (needed @@ -1121,12 +1127,14 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm, } static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u32 action, bool force_assoc_off) + u32 action, bool force_assoc_off, + const u8 *bssid_override) { switch (vif->type) { case NL80211_IFTYPE_STATION: return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action, - force_assoc_off); + force_assoc_off, + bssid_override); break; case NL80211_IFTYPE_AP: if (!vif->p2p) @@ -1157,7 +1165,7 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return -EIO; ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD, - true); + true, NULL); if (ret) return ret; @@ -1169,7 +1177,7 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool force_assoc_off) + bool force_assoc_off, const u8 *bssid_override) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1178,7 +1186,7 @@ int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EIO; return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY, - force_assoc_off); + force_assoc_off, bssid_override); } int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif) diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index cdc272d776e7..069bb8e81c36 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -778,6 +780,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_trans_stop_device(mvm->trans); mvm->scan_status = IWL_MVM_SCAN_NONE; + mvm->ps_disabled = false; /* just in case one was running */ ieee80211_remain_on_channel_expired(mvm->hw); @@ -805,6 +808,9 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) * ucode_down ref until reconfig is complete */ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); + /* clear any stale d0i3 state */ + clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); + mvm->vif_count = 0; mvm->rx_ba_sessions = 0; } @@ -882,7 +888,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) /* async_handlers_list is empty and will stay empty: HW is stopped */ /* the fw is stopped, the aux sta is dead: clean up driver state */ - iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); + iwl_mvm_del_aux_sta(mvm); mutex_unlock(&mvm->mutex); @@ -967,10 +973,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, */ if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) { - u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); - ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, - qmask, - ieee80211_vif_type_p2p(vif)); + ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) { IWL_ERR(mvm, "Failed to allocate bcast sta\n"); goto out_release; @@ -1018,7 +1021,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, if (ret) goto out_unref_phy; - ret = iwl_mvm_add_bcast_sta(mvm, vif, &mvmvif->bcast_sta); + ret = iwl_mvm_add_bcast_sta(mvm, vif); if (ret) goto out_unbind; @@ -1059,14 +1062,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 tfd_msk = 0, ac; - - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) - tfd_msk |= BIT(vif->hw_queue[ac]); - - if (vif->cab_queue != IEEE80211_INVAL_HW_QUEUE) - tfd_msk |= BIT(vif->cab_queue); + u32 tfd_msk = iwl_mvm_mac_get_queues_mask(mvm, vif); if (tfd_msk) { mutex_lock(&mvm->mutex); @@ -1122,13 +1118,13 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, mvm->noa_duration = 0; } #endif - iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_dealloc_bcast_sta(mvm, vif); goto out_release; } if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; - iwl_mvm_rm_bcast_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_rm_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; @@ -1447,10 +1443,23 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false); + /* + * If we're not associated yet, take the (new) BSSID before associating + * so the firmware knows. If we're already associated, then use the old + * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC + * branch for disassociation below. + */ + if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) + memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); + + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); if (ret) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); + /* after sending it once, adopt mac80211 data */ + memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); + mvmvif->associated = bss_conf->assoc; + if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { /* add quota for this interface */ @@ -1478,13 +1487,17 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, */ u32 dur = (11 * vif->bss_conf.beacon_int) / 10; iwl_mvm_protect_session(mvm, vif, dur, dur, - 5 * dur); + 5 * dur, false); } iwl_mvm_sf_update(mvm, vif, false); iwl_mvm_power_vif_assoc(mvm, vif); - if (vif->p2p) + if (vif->p2p) { iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); + iwl_mvm_update_smps(mvm, vif, + IWL_MVM_SMPS_REQ_PROT, + IEEE80211_SMPS_DYNAMIC); + } } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { /* * If update fails - SF might be running in associated @@ -1508,6 +1521,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (vif->p2p) iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); + + /* this will take the cleared BSSID from bss_conf */ + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (ret) + IWL_ERR(mvm, + "failed to update MAC %pM (clear after unassoc)\n", + vif->addr); } iwl_mvm_recalc_multicast(mvm); @@ -1604,7 +1624,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, /* Send the bcast station. At this stage the TBTT and DTIM time events * are added and applied to the scheduler */ - ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta); + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) goto out_unbind; @@ -1620,7 +1640,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) - iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false); + iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); @@ -1636,7 +1656,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, out_quota_failed: iwl_mvm_power_update_mac(mvm); mvmvif->ap_ibss_active = false; - iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_send_rm_bcast_sta(mvm, vif); out_unbind: iwl_mvm_binding_remove_vif(mvm, vif); out_remove: @@ -1678,10 +1698,10 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ if (vif->p2p && mvm->p2p_device_vif) - iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false); + iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); iwl_mvm_update_quotas(mvm, NULL); - iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta); + iwl_mvm_send_rm_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_power_update_mac(mvm); @@ -1705,7 +1725,7 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | BSS_CHANGED_BANDWIDTH) && - iwl_mvm_mac_ctxt_changed(mvm, vif, false)) + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); /* Need to send a new beacon template to the FW */ @@ -2116,7 +2136,7 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, int ret; mutex_lock(&mvm->mutex); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false); + ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); mutex_unlock(&mvm->mutex); return ret; } @@ -2144,7 +2164,7 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); /* Try really hard to protect the session and hear a beacon */ - iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500); + iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); mutex_unlock(&mvm->mutex); iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); @@ -2165,7 +2185,7 @@ static void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); /* Protect the session to hear the TDLS setup response on the channel */ - iwl_mvm_protect_session(mvm, vif, duration, duration, 100); + iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); mutex_unlock(&mvm->mutex); iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS); @@ -2703,7 +2723,10 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, ret = 0; goto out; case NL80211_IFTYPE_STATION: + break; case NL80211_IFTYPE_MONITOR: + /* always disable PS when a monitor interface is active */ + mvmvif->ps_disabled = true; break; default: ret = -EINVAL; @@ -2735,7 +2758,20 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, if ((vif->type == NL80211_IFTYPE_AP) || (switching_chanctx && (vif->type == NL80211_IFTYPE_STATION))) { iwl_mvm_update_quotas(mvm, NULL); - iwl_mvm_mac_ctxt_changed(mvm, vif, false); + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + } + + if (vif->csa_active && vif->type == NL80211_IFTYPE_STATION) { + struct iwl_mvm_sta *mvmsta; + + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, + mvmvif->ap_sta_id); + + if (WARN_ON(!mvmsta)) + goto out; + + /* TODO: only re-enable after the first beacon */ + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); } goto out; @@ -2769,6 +2805,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_vif *disabled_vif = NULL; + struct iwl_mvm_sta *mvmsta; lockdep_assert_held(&mvm->mutex); @@ -2779,6 +2816,7 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, goto out; case NL80211_IFTYPE_MONITOR: mvmvif->monitor_active = false; + mvmvif->ps_disabled = false; break; case NL80211_IFTYPE_AP: /* This part is triggered only during CSA */ @@ -2799,7 +2837,13 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, disabled_vif = vif; - iwl_mvm_mac_ctxt_changed(mvm, vif, true); + mvmsta = iwl_mvm_sta_from_staid_protected(mvm, + mvmvif->ap_sta_id); + + if (!WARN_ON(!mvmsta)) + iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); + + iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); break; default: break; diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index 2e73d3bd7757..e292de96e09a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -103,14 +105,6 @@ */ #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 -enum iwl_mvm_tx_fifo { - IWL_MVM_TX_FIFO_BK = 0, - IWL_MVM_TX_FIFO_BE, - IWL_MVM_TX_FIFO_VI, - IWL_MVM_TX_FIFO_VO, - IWL_MVM_TX_FIFO_MCAST = 5, -}; - extern const struct ieee80211_ops iwl_mvm_hw_ops; /** @@ -203,6 +197,7 @@ enum iwl_dbgfs_pm_mask { MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9), + MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10), }; struct iwl_dbgfs_pm { @@ -215,6 +210,7 @@ struct iwl_dbgfs_pm { u32 lprx_rssi_threshold; bool snooze_ena; bool uapsd_misbehaving; + bool use_ps_poll; int mask; }; @@ -253,6 +249,7 @@ struct iwl_dbgfs_bf { enum iwl_mvm_smps_type_request { IWL_MVM_SMPS_REQ_BT_COEX, IWL_MVM_SMPS_REQ_TT, + IWL_MVM_SMPS_REQ_PROT, NUM_IWL_MVM_SMPS_REQ, }; @@ -315,6 +312,9 @@ struct iwl_mvm_vif_bf_data { * @id: between 0 and 3 * @color: to solve races upon MAC addition and removal * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA + * @bssid: BSSID for this (client) interface + * @associated: indicates that we're currently associated, used only for + * managing the firmware state in iwl_mvm_bss_info_changed_station() * @uploaded: indicates the MAC context has been added to the device * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface * should get quota etc. @@ -323,6 +323,7 @@ struct iwl_mvm_vif_bf_data { * interface should get quota etc. * @low_latency: indicates that this interface is in low-latency mode * (VMACLowLatencyMode) + * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following * vifs: P2P_DEVICE, GO and AP. @@ -335,11 +336,15 @@ struct iwl_mvm_vif { u16 color; u8 ap_sta_id; + u8 bssid[ETH_ALEN]; + bool associated; + bool uploaded; bool ap_ibss_active; bool pm_enabled; bool monitor_active; bool low_latency; + bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; u32 ap_beacon_time; @@ -512,6 +517,10 @@ enum { D0I3_PENDING_WAKEUP, }; +#define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff +#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100 +#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200 + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -553,9 +562,8 @@ struct iwl_mvm { struct mvm_statistics_rx rx_stats; - unsigned long transport_queue_stop; u8 queue_to_mac80211[IWL_MAX_HW_QUEUES]; - atomic_t queue_stop_count[IWL_MAX_HW_QUEUES]; + atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; const char *nvm_file_name; struct iwl_nvm_data *nvm_data; @@ -694,6 +702,12 @@ struct iwl_mvm { /* Thermal Throttling and CTkill */ struct iwl_mvm_tt_mgmt thermal_throttle; s32 temperature; /* Celsius */ + /* + * Debug option to set the NIC temperature. This option makes the + * driver think this is the actual NIC temperature, and ignore the + * real temperature that is received from the fw + */ + bool temperature_test; /* Debug test temperature is enabled */ #ifdef CONFIG_NL80211_TESTMODE u32 noa_duration; @@ -706,7 +720,7 @@ struct iwl_mvm { u8 last_agg_queue; /* Indicate if device power save is allowed */ - bool ps_disabled; + u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; @@ -714,6 +728,8 @@ struct iwl_mvm { /* system time of last beacon (for AP/GO interface) */ u32 ap_last_beacon_gp2; + + u8 low_latency_agg_frame_limit; }; /* Extract MVM priv from op_mode and _hw */ @@ -878,7 +894,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool force_assoc_off); + bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u32 iwl_mvm_mac_get_queues_mask(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -968,6 +984,7 @@ int rs_pretty_print_rate(char *buf, const u32 rate); /* power management */ int iwl_mvm_power_update_device(struct iwl_mvm *mvm); int iwl_mvm_power_update_mac(struct iwl_mvm *mvm); +int iwl_mvm_power_update_ps(struct iwl_mvm *mvm); int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, char *buf, int bufsz); diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c index cfdd314fdd5d..4fafd4bd89f4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/offloading.c b/drivers/net/wireless/iwlwifi/mvm/offloading.c index 9bfb95e89cfb..adcbf4c8edd8 100644 --- a/drivers/net/wireless/iwlwifi/mvm/offloading.c +++ b/drivers/net/wireless/iwlwifi/mvm/offloading.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index 610dbcb0dc27..87f278cc9b2c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -415,6 +417,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->first_agg_queue = 12; } mvm->sf_state = SF_UNINIT; + mvm->low_latency_agg_frame_limit = 1; mutex_init(&mvm->mutex); mutex_init(&mvm->d0i3_suspend_mutex); @@ -456,7 +459,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.command_names = iwl_mvm_cmd_strings; trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; - trans_cfg.cmd_fifo = IWL_MVM_CMD_FIFO; + trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; snprintf(mvm->hw->wiphy->fw_version, sizeof(mvm->hw->wiphy->fw_version), @@ -494,7 +497,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, goto out_free; /* - * Even if nvm exists in the nvm_file driver should read agin the nvm + * Even if nvm exists in the nvm_file driver should read again the nvm * from the nic because there might be entries that exist in the OTP * and not in the file. * for nics with no_power_up_nic_in_init: rely completley on nvm_file @@ -700,14 +703,13 @@ static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) return; - if (atomic_inc_return(&mvm->queue_stop_count[mq]) > 1) { + if (atomic_inc_return(&mvm->mac80211_queue_stop_count[mq]) > 1) { IWL_DEBUG_TX_QUEUES(mvm, "queue %d (mac80211 %d) already stopped\n", queue, mq); return; } - set_bit(mq, &mvm->transport_queue_stop); ieee80211_stop_queue(mvm->hw, mq); } @@ -719,15 +721,13 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE)) return; - if (atomic_dec_return(&mvm->queue_stop_count[mq]) > 0) { + if (atomic_dec_return(&mvm->mac80211_queue_stop_count[mq]) > 0) { IWL_DEBUG_TX_QUEUES(mvm, - "queue %d (mac80211 %d) already awake\n", + "queue %d (mac80211 %d) still stopped\n", queue, mq); return; } - clear_bit(mq, &mvm->transport_queue_stop); - ieee80211_wake_queue(mvm->hw, mq); } diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c index 6cc243f7cf60..12283b55ee84 100644 --- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c index d9769a23c68b..5a29c193b72a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/iwlwifi/mvm/power.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -198,8 +200,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, } } - if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) + if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) { +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* set advanced pm flag with no uapsd ACs to enable ps-poll */ + if (mvmvif->dbgfs_pm.use_ps_poll) + cmd->flags |= + cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); +#endif return; + } cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK); @@ -496,13 +505,31 @@ struct iwl_power_vifs { bool p2p_tdls; }; -static void iwl_mvm_power_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac, + struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_power_vifs *power_iterator = _data; mvmvif->pm_enabled = false; +} + +static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool *disable_ps = _data; + + if (mvmvif->phy_ctxt) + if (mvmvif->phy_ctxt->id < MAX_PHYS) + *disable_ps |= mvmvif->ps_disabled; +} + +static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_power_vifs *power_iterator = _data; + switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_DEVICE: break; @@ -558,9 +585,8 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac, } } -static void -iwl_mvm_power_set_pm(struct iwl_mvm *mvm, - struct iwl_power_vifs *vifs) +static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm, + struct iwl_power_vifs *vifs) { struct iwl_mvm_vif *bss_mvmvif = NULL; struct iwl_mvm_vif *p2p_mvmvif = NULL; @@ -570,10 +596,11 @@ iwl_mvm_power_set_pm(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - /* get vifs info + set pm_enable to false */ + /* set pm_enable to false */ ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_power_iterator, vifs); + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_disable_pm_iterator, + NULL); if (vifs->bss_vif) bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif); @@ -816,32 +843,92 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) +static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) +{ + bool disable_ps; + int ret; + + /* disable PS if CAM */ + disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM); + /* ...or if any of the vifs require PS to be off */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_ps_disabled_iterator, + &disable_ps); + + /* update device power state if it has changed */ + if (mvm->ps_disabled != disable_ps) { + bool old_ps_disabled = mvm->ps_disabled; + + mvm->ps_disabled = disable_ps; + ret = iwl_mvm_power_update_device(mvm); + if (ret) { + mvm->ps_disabled = old_ps_disabled; + return ret; + } + } + + return 0; +} + +static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, + struct iwl_power_vifs *vifs) { struct iwl_mvm_vif *mvmvif; + bool ba_enable; + + if (!vifs->bf_vif) + return 0; + + mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif); + + ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || + !vifs->bf_vif->bss_conf.ps || + iwl_mvm_vif_low_latency(mvmvif)); + + return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable); +} + +int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) +{ + struct iwl_power_vifs vifs = { + .mvm = mvm, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + /* get vifs info */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_get_vifs_iterator, &vifs); + + ret = iwl_mvm_power_set_ps(mvm); + if (ret) + return ret; + + return iwl_mvm_power_set_ba(mvm, &vifs); +} + +int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) +{ struct iwl_power_vifs vifs = { .mvm = mvm, }; - bool ba_enable; int ret; lockdep_assert_held(&mvm->mutex); + /* get vifs info */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_power_get_vifs_iterator, &vifs); + iwl_mvm_power_set_pm(mvm, &vifs); - /* disable PS if CAM */ - if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) { - mvm->ps_disabled = true; - } else { - /* don't update device power state unless we add / remove monitor */ - if (vifs.monitor_vif) { - if (vifs.monitor_active) - mvm->ps_disabled = true; - ret = iwl_mvm_power_update_device(mvm); - if (ret) - return ret; - } - } + ret = iwl_mvm_power_set_ps(mvm); + if (ret) + return ret; if (vifs.bss_vif) { ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif); @@ -855,16 +942,7 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) return ret; } - if (!vifs.bf_vif) - return 0; - - mvmvif = iwl_mvm_vif_from_mac80211(vifs.bf_vif); - - ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || - !vifs.bf_vif->bss_conf.ps || - iwl_mvm_vif_low_latency(mvmvif)); - - return iwl_mvm_update_beacon_abort(mvm, vifs.bf_vif, ba_enable); + return iwl_mvm_power_set_ba(mvm, &vifs); } int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c index 4e20b3ce2b6a..5fd502db03d1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/iwlwifi/mvm/quota.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -161,6 +163,9 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm, quota *= (beacon_int - mvm->noa_duration); quota /= beacon_int; + IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n", + le32_to_cpu(cmd->quotas[i].quota), quota); + cmd->quotas[i].quota = cpu_to_le32(quota); } #endif @@ -222,6 +227,9 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat; quota_rem = QUOTA_100 - n_non_lowlat * quota - QUOTA_LOWLAT_MIN; + IWL_DEBUG_QUOTA(mvm, + "quota: low-latency binding active, remaining quota per other binding: %d\n", + quota); } else if (num_active_macs) { /* * There are 0 or more than 1 low latency bindings, or all the @@ -230,6 +238,9 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, */ quota = QUOTA_100 / num_active_macs; quota_rem = QUOTA_100 % num_active_macs; + IWL_DEBUG_QUOTA(mvm, + "quota: splitting evenly per binding: %d\n", + quota); } else { /* values don't really matter - won't be used */ quota = 0; @@ -271,6 +282,9 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, for (i = 0; i < MAX_BINDINGS; i++) { if (le32_to_cpu(cmd.quotas[i].quota) != 0) { le32_add_cpu(&cmd.quotas[i].quota, quota_rem); + IWL_DEBUG_QUOTA(mvm, + "quota: giving remainder of %d to binding %d\n", + quota_rem, i); break; } } diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index c70e959bf0e3..17002cf437db 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -2678,6 +2679,7 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, int i; int num_rates = ARRAY_SIZE(lq_cmd->rs_table); __le32 ucode_rate_le32 = cpu_to_le32(ucode_rate); + u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; for (i = 0; i < num_rates; i++) lq_cmd->rs_table[i] = ucode_rate_le32; @@ -2688,6 +2690,13 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm, lq_cmd->mimo_delim = num_rates - 1; else lq_cmd->mimo_delim = 0; + + lq_cmd->reduced_tpc = 0; + + if (num_of_ant(ant) == 1) + lq_cmd->single_stream_ant_msk = ant; + + lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; } #endif /* CONFIG_MAC80211_DEBUGFS */ @@ -2811,31 +2820,55 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm, const struct rs_rate *initial_rate) { struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; - u8 ant = initial_rate->ant; + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_vif *mvmvif; + + lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + lq_cmd->agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); #ifdef CONFIG_MAC80211_DEBUGFS if (lq_sta->pers.dbg_fixed_rate) { rs_build_rates_table_from_fixed(mvm, lq_cmd, lq_sta->band, lq_sta->pers.dbg_fixed_rate); - lq_cmd->reduced_tpc = 0; - ant = (lq_sta->pers.dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >> - RATE_MCS_ANT_POS; - } else + return; + } #endif - rs_build_rates_table(mvm, lq_sta, initial_rate); + if (WARN_ON_ONCE(!sta || !initial_rate)) + return; - if (num_of_ant(ant) == 1) - lq_cmd->single_stream_ant_msk = ant; + rs_build_rates_table(mvm, lq_sta, initial_rate); - lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; - lq_cmd->agg_disable_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + if (num_of_ant(initial_rate->ant) == 1) + lq_cmd->single_stream_ant_msk = initial_rate->ant; - lq_cmd->agg_time_limit = - cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (num_of_ant(initial_rate->ant) == 1) + lq_cmd->single_stream_ant_msk = initial_rate->ant; + + lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize; - if (sta) - lq_cmd->agg_time_limit = + /* + * In case of low latency, tell the firwmare to leave a frame in the + * Tx Fifo so that it can start a transaction in the same TxOP. This + * basically allows the firmware to send bursts. + */ + if (iwl_mvm_vif_low_latency(mvmvif)) { + lq_cmd->agg_frame_cnt_limit--; + + if (mvm->low_latency_agg_frame_limit) + lq_cmd->agg_frame_cnt_limit = + min(lq_cmd->agg_frame_cnt_limit, + mvm->low_latency_agg_frame_limit); + } + + if (mvmsta->vif->p2p) + lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK; + + lq_cmd->agg_time_limit = cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta)); } @@ -2932,10 +2965,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm, lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate); if (lq_sta->pers.dbg_fixed_rate) { - struct rs_rate rate; - rs_rate_from_ucode_rate(lq_sta->pers.dbg_fixed_rate, - lq_sta->band, &rate); - rs_fill_lq_cmd(mvm, NULL, lq_sta, &rate); + rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL); iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false); } } diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c index bf5cd8c8b0f7..a6cb84ed653f 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/iwlwifi/mvm/rx.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -491,10 +493,29 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, .mvm = mvm, }; + /* + * set temperature debug enabled - ignore FW temperature updates + * and use the user set temperature. + */ + if (mvm->temperature_test) { + if (mvm->temperature < le32_to_cpu(common->temperature)) + IWL_DEBUG_TEMP(mvm, + "Ignoring FW temperature update that is greater than the debug set temperature (debug temp = %d, fw temp = %d)\n", + mvm->temperature, + le32_to_cpu(common->temperature)); + /* + * skip iwl_mvm_tt_handler since we are in + * temperature debug mode and we are ignoring + * the new temperature value + */ + goto update; + } + if (mvm->temperature != le32_to_cpu(common->temperature)) { mvm->temperature = le32_to_cpu(common->temperature); iwl_mvm_tt_handler(mvm); } +update: iwl_mvm_update_rx_statistics(mvm, stats); ieee80211_iterate_active_interfaces(mvm->hw, diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 004b1f5d0314..bf9c63dc4a7d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -279,6 +281,7 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, { bool global_bound = false; enum ieee80211_band band; + u8 frag_passive_dwell = 0; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, @@ -288,12 +291,36 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, if (!global_bound) goto not_bound; - params->suspend_time = 100; - params->max_out_time = 600; + params->suspend_time = 30; + params->max_out_time = 170; if (iwl_mvm_low_latency(mvm)) { - params->suspend_time = 250; - params->max_out_time = 250; + if (mvm->fw->ucode_capa.api[0] & + IWL_UCODE_TLV_API_FRAGMENTED_SCAN) { + params->suspend_time = 105; + params->max_out_time = 70; + frag_passive_dwell = 20; + } else { + params->suspend_time = 120; + params->max_out_time = 120; + } + } + + if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] & + IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) { + /* + * P2P device scan should not be fragmented to avoid negative + * impact on P2P device discovery. Configure max_out_time to be + * equal to dwell time on passive channel. Take a longest + * possible value, one that corresponds to 2GHz band + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + u32 passive_dwell = + iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); + params->max_out_time = passive_dwell; + } else { + params->passive_fragmented = true; + } } if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY) @@ -302,7 +329,11 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, not_bound: for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { - params->dwell[band].passive = iwl_mvm_get_passive_dwell(band); + if (params->passive_fragmented) + params->dwell[band].passive = frag_passive_dwell; + else + params->dwell[band].passive = + iwl_mvm_get_passive_dwell(band); params->dwell[band].active = iwl_mvm_get_active_dwell(band, n_ssids); } @@ -1100,10 +1131,11 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm, struct iwl_mvm_scan_params *params) { memset(cmd, 0, ksize(cmd)); - cmd->active_dwell = (u8)params->dwell[IEEE80211_BAND_2GHZ].active; - cmd->passive_dwell = (u8)params->dwell[IEEE80211_BAND_2GHZ].passive; - /* TODO: Use params; now fragmented isn't used. */ - cmd->fragmented_dwell = 0; + cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active; + cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive; + if (params->passive_fragmented) + cmd->fragmented_dwell = + params->dwell[IEEE80211_BAND_2GHZ].passive; cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm); cmd->max_out_time = cpu_to_le32(params->max_out_time); cmd->suspend_time = cpu_to_le32(params->suspend_time); diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c index e843b67f2201..f88410c7cbfb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/iwlwifi/mvm/sf.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 763548880399..dd9f3a4347f6 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -250,10 +252,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, if (ret) return ret; - /* The first station added is the AP, the others are TDLS STAs */ - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) - mvmvif->ap_sta_id = sta_id; + if (vif->type == NL80211_IFTYPE_STATION) { + if (!sta->tdls) { + WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT); + mvmvif->ap_sta_id = sta_id; + } else { + WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT); + } + } rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); @@ -458,8 +464,9 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, return ret; } -int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype) +static int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + u32 qmask, enum nl80211_iftype iftype) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); @@ -474,7 +481,8 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, return 0; } -void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) +static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); @@ -544,6 +552,13 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) return ret; } +void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) +{ + lockdep_assert_held(&mvm->mutex); + + iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); +} + /* * Send the add station command for the vif's broadcast station. * Assumes that the station was already allocated. @@ -552,10 +567,10 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ -int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *bsta) +int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; const u8 *baddr = _baddr; @@ -573,19 +588,40 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* Send the FW a request to remove the station from it's internal data * structures, but DO NOT remove the entry from the local data structures. */ -int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *bsta) +int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; lockdep_assert_held(&mvm->mutex); - ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id); + ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); return ret; } +int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 qmask; + + lockdep_assert_held(&mvm->mutex); + + qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); + + /* + * The firmware defines the TFD queue mask to only be relevant + * for *unicast* queues, so the multicast (CAB) queue shouldn't + * be included. + */ + if (vif->type == NL80211_IFTYPE_AP) + qmask &= ~BIT(vif->cab_queue); + + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, + ieee80211_vif_type_p2p(vif)); +} + /* Allocate a new station entry for the broadcast station to the given vif, * and send it to the FW. * Note that each P2P mac should have its own broadcast station. @@ -593,45 +629,47 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *bsta) +int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; - u32 qmask; + struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; int ret; lockdep_assert_held(&mvm->mutex); - qmask = iwl_mvm_mac_get_queues_mask(mvm, vif); - ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask, - ieee80211_vif_type_p2p(vif)); + ret = iwl_mvm_alloc_bcast_sta(mvm, vif); if (ret) return ret; - ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, - mvmvif->id, mvmvif->color); + ret = iwl_mvm_send_add_bcast_sta(mvm, vif); if (ret) iwl_mvm_dealloc_int_sta(mvm, bsta); + return ret; } +void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); +} + /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta) +int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; lockdep_assert_held(&mvm->mutex); - ret = iwl_mvm_rm_sta_common(mvm, bsta->sta_id); - if (ret) - return ret; + ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); + + iwl_mvm_dealloc_bcast_sta(mvm, vif); - iwl_mvm_dealloc_int_sta(mvm, bsta); return ret; } @@ -910,7 +948,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } tid_data->ssn = 0xffff; - iwl_trans_txq_disable(mvm->trans, txq_id); + iwl_trans_txq_disable(mvm->trans, txq_id, true); /* fall through */ case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: @@ -965,7 +1003,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true)) IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); - iwl_trans_txq_disable(mvm->trans, tid_data->txq_id); + iwl_trans_txq_disable(mvm->trans, tid_data->txq_id, true); } mvm->queue_to_mac80211[tid_data->txq_id] = diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index 3b1c8bd6cb54..aeb3a7f80ceb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -387,17 +389,15 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); -int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, - u32 qmask, enum nl80211_iftype iftype); -void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta); -int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *bsta); -int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *bsta); -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *bsta); -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta); +void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); + +int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); + void iwl_mvm_sta_drained_wk(struct work_struct *wk); void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta); diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h index 0241665925f7..79ab6beb6b26 100644 --- a/drivers/net/wireless/iwlwifi/mvm/testmode.h +++ b/drivers/net/wireless/iwlwifi/mvm/testmode.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index 33e5041f1efc..447d3b1003df 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -348,6 +350,38 @@ unlock: return 0; } +static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm *mvm = + container_of(notif_wait, struct iwl_mvm, notif_wait); + struct iwl_mvm_time_event_data *te_data = data; + struct iwl_time_event_notif *resp; + int resp_len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION)) + return true; + + if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { + IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n"); + return true; + } + + resp = (void *)pkt->data; + + /* te_data->uid is already set in the TIME_EVENT_CMD response */ + if (le32_to_cpu(resp->unique_id) != te_data->uid) + return false; + + IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n", + te_data->uid); + if (!resp->status) + IWL_ERR(mvm, + "TIME_EVENT_NOTIFICATION received but not executed\n"); + + return true; +} + static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { @@ -441,10 +475,12 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, void iwl_mvm_protect_session(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, - u32 max_delay) + u32 max_delay, bool wait_for_notif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION }; + struct iwl_notification_wait wait_te_notif; struct iwl_time_event_cmd time_cmd = {}; lockdep_assert_held(&mvm->mutex); @@ -489,7 +525,28 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, TE_V2_NOTIF_HOST_EVENT_END | T2_V2_START_IMMEDIATELY); - iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); + if (!wait_for_notif) { + iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); + return; + } + + /* + * Create notification_wait for the TIME_EVENT_NOTIFICATION to use + * right after we send the time event + */ + iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif, + te_notif_response, + ARRAY_SIZE(te_notif_response), + iwl_mvm_te_notif, te_data); + + /* If TE was sent OK - wait for the notification that started */ + if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) { + IWL_ERR(mvm, "Failed to add TE to protect session\n"); + iwl_remove_notification(&mvm->notif_wait, &wait_te_notif); + } else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif, + TU_TO_JIFFIES(max_delay))) { + IWL_ERR(mvm, "Failed to protect session until TE\n"); + } } /* diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h index 2f48a90d4ad3..bee3b2446b35 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -124,10 +126,12 @@ * @min_duration: will start a new session if the current session will end * in less than min_duration. * @max_delay: maximum delay before starting the time event (in TU) + * @wait_for_notif: true if it is required that a time event notification be + * waited for (that the time event has been scheduled before returning) * * This function can be used to start a session protection which means that the * fw will stay on the channel for %duration_ms milliseconds. This function - * will block (sleep) until the session starts. This function can also be used + * can block (sleep) until the session starts. This function can also be used * to extend a currently running session. * This function is meant to be used for BSS association for example, where we * want to make sure that the fw stays on the channel during the association. @@ -135,7 +139,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 duration, u32 min_duration, - u32 max_delay); + u32 max_delay, bool wait_for_notif); /** * iwl_mvm_stop_session_protection - cancel the session protection. diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c index 0464599c111e..c3e1fe4282f1 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/iwlwifi/mvm/tt.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -314,14 +316,26 @@ static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) { u32 duration = mvm->thermal_throttle.params->ct_kill_duration; + if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) + return; + IWL_ERR(mvm, "Enter CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, true); - schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, - round_jiffies_relative(duration * HZ)); + + /* Don't schedule an exit work if we're in test mode, since + * the temperature will not change unless we manually set it + * again (or disable testing). + */ + if (!mvm->temperature_test) + schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit, + round_jiffies_relative(duration * HZ)); } static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm) { + if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) + return; + IWL_ERR(mvm, "Exit CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, false); } @@ -444,6 +458,12 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm) return; } + if (params->support_ct_kill && + temperature <= tt->params->ct_kill_exit) { + iwl_mvm_exit_ctkill(mvm); + return; + } + if (params->support_dynamic_smps) { if (!tt->dynamic_smps && temperature >= params->dynamic_smps_entry) { diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 9ee410bf6da2..ed0919465e0e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -486,7 +488,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); - iwl_trans_txq_disable(mvm->trans, tid_data->txq_id); + iwl_trans_txq_disable(mvm->trans, tid_data->txq_id, true); tid_data->state = IWL_AGG_OFF; /* * we can't hold the mutex - but since we are after a sequence diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index ac249da8a22b..1958f298ac8b 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -387,15 +389,19 @@ struct iwl_error_event_table { struct iwl_umac_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ - u32 pc; /* program counter */ u32 blink1; /* branch link */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ u32 data1; /* error-specific data */ u32 data2; /* error-specific data */ - u32 line; /* source code line of error */ - u32 umac_ver; /* umac version */ + u32 data3; /* error-specific data */ + u32 umac_fw_ver; /* UMAC version */ + u32 umac_fw_api_ver; /* UMAC FW API ver */ + u32 frame_pointer; /* core register 27*/ + u32 stack_pointer; /* core register 28 */ + u32 cmd_header; /* latest host cmd sent to UMAC */ + u32 nic_isr_pref; /* ISR status register */ } __packed; #define ERROR_START_OFFSET (1 * sizeof(u32)) @@ -409,7 +415,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) base = mvm->umac_error_event_table; - if (base < 0x800000 || base >= 0x80C000) { + if (base < 0x800000) { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, @@ -428,14 +434,19 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); - IWL_ERR(mvm, "0x%08X | umac uPc\n", table.pc); IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1); IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1); IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2); IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1); IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2); - IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_ver); + IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3); + IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_fw_ver); + IWL_ERR(mvm, "0x%08X | umac api version\n", table.umac_fw_api_ver); + IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer); + IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer); + IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header); + IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 073a68b97a72..b9d5049e52da 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 78f72c34438a..a4fedc4a7448 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -364,9 +365,10 @@ int iwl_pcie_tx_init(struct iwl_trans *trans); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); void iwl_pcie_tx_free(struct iwl_trans *trans); -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn); -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); +void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg); +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index a2698e5e062c..702f47fb16fe 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 06e04aaf61ee..3076e0e9a490 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 6acccb19c4f3..a6336b4aa3a4 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -34,6 +35,7 @@ #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-io.h" +#include "iwl-scd.h" #include "iwl-op-mode.h" #include "internal.h" /* FIXME: need to abstract out TX command (once we know what it looks like) */ @@ -644,17 +646,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) memset(txq, 0, sizeof(*txq)); } -/* - * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask - */ -static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask) -{ - struct iwl_trans_pcie __maybe_unused *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - - iwl_write_prph(trans, SCD_TXFACT, mask); -} - void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -692,7 +683,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) trans_pcie->cmd_fifo); /* Activate all Tx DMA/FIFO channels */ - iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7)); + iwl_scd_activate_fifos(trans); /* Enable DMA channel */ for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) @@ -745,7 +736,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) /* Turn off all Tx DMA fifos */ spin_lock(&trans_pcie->irq_lock); - iwl_pcie_txq_set_sched(trans, 0); + iwl_scd_deactivate_fifos(trans); /* Stop each Tx DMA channel, and wait for it to be idle */ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { @@ -886,7 +877,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) spin_lock(&trans_pcie->irq_lock); /* Turn off all Tx DMA fifos */ - iwl_write_prph(trans, SCD_TXFACT, 0); + iwl_scd_deactivate_fifos(trans); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, @@ -1072,55 +1063,52 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, return 0; } -static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans, - u16 txq_id) -{ - /* Simply stop the queue, but don't change any configuration; - * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ - iwl_write_prph(trans, - SCD_QUEUE_STATUS_BITS(txq_id), - (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| - (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); -} - /* Receiver address (actually, Rx station's index into station table), * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn) +void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int fifo = -1; if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); - /* Stop this Tx queue before configuring it */ - iwl_pcie_txq_set_inactive(trans, txq_id); + if (cfg) { + fifo = cfg->fifo; - /* Set this queue as a chain-building queue unless it is CMD queue */ - if (txq_id != trans_pcie->cmd_queue) - iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id)); + /* Disable the scheduler prior configuring the cmd queue */ + if (txq_id == trans_pcie->cmd_queue) + iwl_scd_enable_set_active(trans, 0); - /* If this queue is mapped to a certain station: it is an AGG queue */ - if (sta_id >= 0) { - u16 ra_tid = BUILD_RAxTID(sta_id, tid); + /* Stop this Tx queue before configuring it */ + iwl_scd_txq_set_inactive(trans, txq_id); - /* Map receiver-address / traffic-ID to this queue */ - iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); + /* Set this queue as a chain-building queue unless it is CMD */ + if (txq_id != trans_pcie->cmd_queue) + iwl_scd_txq_set_chain(trans, txq_id); - /* enable aggregations for the queue */ - iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); - trans_pcie->txq[txq_id].ampdu = true; - } else { - /* - * disable aggregations for the queue, this will also make the - * ra_tid mapping configuration irrelevant since it is now a - * non-AGG queue. - */ - iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); + if (cfg->aggregate) { + u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); + + /* Map receiver-address / traffic-ID to this queue */ + iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); + + /* enable aggregations for the queue */ + iwl_scd_txq_enable_agg(trans, txq_id); + trans_pcie->txq[txq_id].ampdu = true; + } else { + /* + * disable aggregations for the queue, this will also + * make the ra_tid mapping configuration irrelevant + * since it is now a non-AGG queue. + */ + iwl_scd_txq_disable_agg(trans, txq_id); - ssn = trans_pcie->txq[txq_id].q.read_ptr; + ssn = trans_pcie->txq[txq_id].q.read_ptr; + } } /* Place first TFD at index corresponding to start sequence number. @@ -1128,32 +1116,43 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); - iwl_write_direct32(trans, HBUS_TARG_WRPTR, - (ssn & 0xff) | (txq_id << 8)); - iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); + if (cfg) { + u8 frame_limit = cfg->frame_limit; + + iwl_write_direct32(trans, HBUS_TARG_WRPTR, + (ssn & 0xff) | (txq_id << 8)); + iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); - /* Set up Tx window size and frame limit for this queue */ - iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + - SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); - iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + + /* Set up Tx window size and frame limit for this queue */ + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); + iwl_trans_write_mem32(trans, + trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & - SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | + SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); - - /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), - (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | - (1 << SCD_QUEUE_STTS_REG_POS_WSL) | - SCD_QUEUE_STTS_REG_MSK); + SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); + + /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | + (1 << SCD_QUEUE_STTS_REG_POS_WSL) | + SCD_QUEUE_STTS_REG_MSK); + + /* enable the scheduler for this queue (only) */ + if (txq_id == trans_pcie->cmd_queue) + iwl_scd_enable_set_active(trans, BIT(txq_id)); + } + trans_pcie->txq[txq_id].active = true; IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", txq_id, fifo, ssn & 0xff); } -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, + bool configure_scd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 stts_addr = trans_pcie->scd_base_addr + @@ -1172,10 +1171,12 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) return; } - iwl_pcie_txq_set_inactive(trans, txq_id); + if (configure_scd) { + iwl_scd_txq_set_inactive(trans, txq_id); - iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, - ARRAY_SIZE(zero_val)); + iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, + ARRAY_SIZE(zero_val)); + } iwl_pcie_txq_unmap(trans, txq_id); trans_pcie->txq[txq_id].ampdu = false; diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 47a998d8f99e..22884ba7d6cc 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -653,6 +653,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, if (channel && !(channel->flags & IEEE80211_CHAN_DISABLED)) { bss = cfg80211_inform_bss(wiphy, channel, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, get_unaligned_le64(tsfdesc), capa, intvl, ie, ielen, LBS_SCAN_RSSI_TO_MBM(rssi), @@ -1754,6 +1755,7 @@ static void lbs_join_post(struct lbs_private *priv, bss = cfg80211_inform_bss(priv->wdev->wiphy, params->chandef.chan, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, 0, capability, diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index e2e6bf13c2d8..c4723b0f5757 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c @@ -246,7 +246,7 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy, } if (priv->roc_cfg.cookie) { - wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llu\n", + wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llx\n", priv->roc_cfg.cookie); return -EBUSY; } @@ -1557,6 +1557,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) band)); bss = cfg80211_inform_bss(priv->wdev->wiphy, chan, + CFG80211_BSS_FTYPE_UNKNOWN, bss_info.bssid, 0, WLAN_CAPABILITY_IBSS, 0, ie_buf, ie_len, 0, GFP_KERNEL); cfg80211_put_bss(priv->wdev->wiphy, bss); diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index baf0aab63c04..985f6c2654fb 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -1470,7 +1470,7 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec; struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_header *tlv; - struct hw_spec_fw_api_rev *api_rev; + struct hw_spec_api_rev *api_rev; u16 resp_size, api_id; int i, left_len, parsed_len = 0; @@ -1508,7 +1508,6 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, } adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number); - adapter->fw_api_ver = (adapter->fw_release_number >> 16) & 0xff; adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna); if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) { @@ -1538,23 +1537,30 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, while (left_len > sizeof(struct mwifiex_ie_types_header)) { tlv = (void *)&hw_spec->tlvs + parsed_len; switch (le16_to_cpu(tlv->type)) { - case TLV_TYPE_FW_API_REV: - api_rev = (struct hw_spec_fw_api_rev *)tlv; + case TLV_TYPE_API_REV: + api_rev = (struct hw_spec_api_rev *)tlv; api_id = le16_to_cpu(api_rev->api_id); switch (api_id) { case KEY_API_VER_ID: - adapter->fw_key_api_major_ver = + adapter->key_api_major_ver = api_rev->major_ver; - adapter->fw_key_api_minor_ver = + adapter->key_api_minor_ver = api_rev->minor_ver; dev_dbg(adapter->dev, - "fw_key_api v%d.%d\n", - adapter->fw_key_api_major_ver, - adapter->fw_key_api_minor_ver); + "key_api v%d.%d\n", + adapter->key_api_major_ver, + adapter->key_api_minor_ver); + break; + case FW_API_VER_ID: + adapter->fw_api_ver = + api_rev->major_ver; + dev_dbg(adapter->dev, + "Firmware api version %d\n", + adapter->fw_api_ver); break; default: dev_warn(adapter->dev, - "Unknown FW api_id: %d\n", + "Unknown api_id: %d\n", api_id); break; } @@ -1567,7 +1573,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, } parsed_len += le16_to_cpu(tlv->len) + sizeof(struct mwifiex_ie_types_header); - left_len -= parsed_len; + left_len -= le16_to_cpu(tlv->len) + + sizeof(struct mwifiex_ie_types_header); } } diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h index 49da2d53d294..6a703ea18800 100644 --- a/drivers/net/wireless/mwifiex/fw.h +++ b/drivers/net/wireless/mwifiex/fw.h @@ -83,7 +83,7 @@ enum KEY_TYPE_ID { #define WPA_PN_SIZE 8 #define KEY_PARAMS_FIXED_LEN 10 #define KEY_INDEX_MASK 0xf -#define FW_KEY_API_VER_MAJOR_V2 2 +#define KEY_API_VER_MAJOR_V2 2 #define KEY_MCAST BIT(0) #define KEY_UNICAST BIT(1) @@ -170,7 +170,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) #define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) -#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199) +#define TLV_TYPE_API_REV (PROPRIETARY_TLV_BASE_ID + 199) #define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048 @@ -844,11 +844,12 @@ struct host_cmd_ds_802_11_ps_mode_enh { } params; } __packed; -enum FW_API_VER_ID { +enum API_VER_ID { KEY_API_VER_ID = 1, + FW_API_VER_ID = 2, }; -struct hw_spec_fw_api_rev { +struct hw_spec_api_rev { struct mwifiex_ie_types_header header; __le16 api_id; u8 major_ver; diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 269a277d0a2e..80bda8087508 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -282,8 +282,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter) adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX; adapter->empty_tx_q_cnt = 0; adapter->ext_scan = true; - adapter->fw_key_api_major_ver = 0; - adapter->fw_key_api_minor_ver = 0; + adapter->key_api_major_ver = 0; + adapter->key_api_minor_ver = 0; } /* diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index a2733b1e63f9..54399631599a 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -833,7 +833,7 @@ struct mwifiex_adapter { struct semaphore *card_sem; bool ext_scan; u8 fw_api_ver; - u8 fw_key_api_major_ver, fw_key_api_minor_ver; + u8 key_api_major_ver, key_api_minor_ver; struct work_struct iface_work; unsigned long iface_work_flags; struct memory_type_mapping *mem_type_mapping_tbl; diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index dee717a19ddb..195ef0ca343f 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -1719,7 +1719,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { bss = cfg80211_inform_bss(priv->wdev->wiphy, - chan, bssid, timestamp, + chan, CFG80211_BSS_FTYPE_UNKNOWN, + bssid, timestamp, cap_info_bitmap, beacon_period, ie_buf, ie_len, rssi, GFP_KERNEL); bss_priv = (struct mwifiex_bss_priv *)bss->priv; diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c index 733de92a4c61..225f7498048b 100644 --- a/drivers/net/wireless/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/mwifiex/sta_cmd.c @@ -965,7 +965,7 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv, u16 cmd_action, u32 cmd_oid, struct mwifiex_ds_encrypt_key *enc_key) { - if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) + if (priv->adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) return mwifiex_cmd_802_11_key_material_v2(priv, cmd, cmd_action, cmd_oid, enc_key); diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c index 08b78baeb846..62866b0f0830 100644 --- a/drivers/net/wireless/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c @@ -637,7 +637,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { - if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) + if (priv->adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) return mwifiex_ret_802_11_key_material_v2(priv, resp); else return mwifiex_ret_802_11_key_material_v1(priv, resp); diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index caae9738100a..b95a29b868d1 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -877,7 +877,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv, return -1; } - if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) { + if (adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) { memcpy(encrypt_key->key_material, wep_key->key_material, wep_key->key_length); encrypt_key->key_len = wep_key->key_length; @@ -903,7 +903,7 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv, memset(&priv->wep_key[index], 0, sizeof(struct mwifiex_wep_key)); - if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) + if (adapter->key_api_major_ver == KEY_API_VER_MAJOR_V2) enc_key = encrypt_key; else enc_key = NULL; diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index 7118a18b91ba..4371e12b36f3 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -357,7 +357,7 @@ static int mwifiex_usb_probe(struct usb_interface *intf, card->usb_boot_state = USB8XXX_FW_READY; break; default: - pr_warning("unknown id_product %#x\n", id_product); + pr_warn("unknown id_product %#x\n", id_product); card->usb_boot_state = USB8XXX_FW_DNLD; break; } diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c index cee028321a9a..ec79c49de097 100644 --- a/drivers/net/wireless/mwifiex/util.c +++ b/drivers/net/wireless/mwifiex/util.c @@ -172,7 +172,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq, CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len, - 0, GFP_ATOMIC); + 0); return 0; } diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index d3cf7c3ebfd6..995846422dc0 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -534,7 +534,7 @@ static void ezusb_request_out_callback(struct urb *urb) if (ctx->killed) { spin_unlock_irqrestore(&upriv->req_lock, flags); - pr_warning("interrupt called with dead ctx"); + pr_warn("interrupt called with dead ctx\n"); goto out; } @@ -671,8 +671,8 @@ static void ezusb_request_in_callback(struct ezusb_priv *upriv, default: spin_unlock_irqrestore(&upriv->req_lock, flags); - pr_warning("Matched IN URB, unexpected context state(0x%x)", - state); + pr_warn("Matched IN URB, unexpected context state(0x%x)\n", + state); /* Throw this CTX away and try submitting another */ del_timer(&ctx->timer); ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; @@ -1394,12 +1394,12 @@ static void ezusb_bulk_in_callback(struct urb *urb) /* When a device gets unplugged we get this every time * we resubmit, flooding the logs. Since we don't use * USB timeouts, it shouldn't happen any other time*/ - pr_warning("%s: urb timed out, not resubmiting", __func__); + pr_warn("%s: urb timed out, not resubmitting\n", __func__); return; } if (urb->status == -ECONNABORTED) { - pr_warning("%s: connection abort, resubmiting urb", - __func__); + pr_warn("%s: connection abort, resubmitting urb\n", + __func__); goto resubmit; } if ((urb->status == -EILSEQ) @@ -1605,13 +1605,10 @@ static int ezusb_probe(struct usb_interface *interface, for (i = 0; i < iface_desc->bNumEndpoints; ++i) { ep = &interface->altsetting[0].endpoint[i].desc; - if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) - == USB_DIR_IN) && - ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) - == USB_ENDPOINT_XFER_BULK)) { + if (usb_endpoint_is_bulk_in(ep)) { /* we found a bulk in endpoint */ if (upriv->read_urb != NULL) { - pr_warning("Found a second bulk in ep, ignored"); + pr_warn("Found a second bulk in ep, ignored\n"); continue; } @@ -1621,10 +1618,10 @@ static int ezusb_probe(struct usb_interface *interface, goto error; } if (le16_to_cpu(ep->wMaxPacketSize) != 64) - pr_warning("bulk in: wMaxPacketSize!= 64"); + pr_warn("bulk in: wMaxPacketSize!= 64\n"); if (ep->bEndpointAddress != (2 | USB_DIR_IN)) - pr_warning("bulk in: bEndpointAddress: %d", - ep->bEndpointAddress); + pr_warn("bulk in: bEndpointAddress: %d\n", + ep->bEndpointAddress); upriv->read_pipe = usb_rcvbulkpipe(udev, ep-> bEndpointAddress); @@ -1636,21 +1633,18 @@ static int ezusb_probe(struct usb_interface *interface, } } - if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) - == USB_DIR_OUT) && - ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) - == USB_ENDPOINT_XFER_BULK)) { + if (usb_endpoint_is_bulk_out(ep)) { /* we found a bulk out endpoint */ if (upriv->bap_buf != NULL) { - pr_warning("Found a second bulk out ep, ignored"); + pr_warn("Found a second bulk out ep, ignored\n"); continue; } if (le16_to_cpu(ep->wMaxPacketSize) != 64) - pr_warning("bulk out: wMaxPacketSize != 64"); + pr_warn("bulk out: wMaxPacketSize != 64\n"); if (ep->bEndpointAddress != 2) - pr_warning("bulk out: bEndpointAddress: %d", - ep->bEndpointAddress); + pr_warn("bulk out: bEndpointAddress: %d\n", + ep->bEndpointAddress); upriv->write_pipe = usb_sndbulkpipe(udev, ep-> bEndpointAddress); diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c index e175b9b8561b..2c66166add70 100644 --- a/drivers/net/wireless/orinoco/scan.c +++ b/drivers/net/wireless/orinoco/scan.c @@ -123,9 +123,10 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv, beacon_interval = le16_to_cpu(bss->a.beacon_interv); signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level)); - cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp, - capability, beacon_interval, ie_buf, ie_len, - signal, GFP_KERNEL); + cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, + bss->a.bssid, timestamp, capability, + beacon_interval, ie_buf, ie_len, signal, + GFP_KERNEL); cfg80211_put_bss(wiphy, cbss); } @@ -156,9 +157,10 @@ void orinoco_add_extscan_result(struct orinoco_private *priv, ie = bss->data; signal = SIGNAL_TO_MBM(bss->level); - cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp, - capability, beacon_interval, ie, ie_len, - signal, GFP_KERNEL); + cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, + bss->bssid, timestamp, capability, + beacon_interval, ie, ie_len, signal, + GFP_KERNEL); cfg80211_put_bss(wiphy, cbss); } diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h index e79848fbcca1..524c2f02dd82 100644 --- a/drivers/net/wireless/ray_cs.h +++ b/drivers/net/wireless/ray_cs.h @@ -3,7 +3,8 @@ Written by Corey Thomas */ -#ifndef RAYLINK_H +#ifndef _RAY_CS_H_ +#define _RAY_CS_H_ struct beacon_rx { struct mac_header mac; @@ -69,4 +70,4 @@ typedef struct ray_dev_t { } ray_dev_t; /*****************************************************************************/ -#endif /* RAYLINK_H */ +#endif /* _RAY_CS_H_ */ diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h index 3c3b98b152c3..b21ed64e15df 100644 --- a/drivers/net/wireless/rayctl.h +++ b/drivers/net/wireless/rayctl.h @@ -1,4 +1,5 @@ -#ifndef RAYLINK_H +#ifndef _RAYCTL_H_ +#define _RAYCTL_H_ typedef unsigned char UCHAR; @@ -729,4 +730,4 @@ typedef struct snaphdr_t #define RAY_IPX_TYPE 0x8137 #define APPLEARP_TYPE 0x80f3 /*****************************************************************************/ -#endif /* #ifndef RAYLINK_H */ +#endif /* _RAYCTL_H_ */ diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index d2a9a08210be..1a4facd1fbf3 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2022,9 +2022,10 @@ static bool rndis_bss_info_update(struct usbnet *usbdev, capability = le16_to_cpu(fixed->capabilities); beacon_interval = le16_to_cpu(fixed->beacon_interval); - bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac, - timestamp, capability, beacon_interval, ie, ie_len, signal, - GFP_KERNEL); + bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, + CFG80211_BSS_FTYPE_UNKNOWN, bssid->mac, + timestamp, capability, beacon_interval, + ie, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(priv->wdev.wiphy, bss); return (bss != NULL); @@ -2711,9 +2712,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid, bssid, (u32)timestamp, capability, beacon_period, ie_len, ssid.essid, signal); - bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid, - timestamp, capability, beacon_period, ie_buf, ie_len, - signal, GFP_KERNEL); + bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, + timestamp, capability, beacon_period, + ie_buf, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(priv->wdev.wiphy, bss); } diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index a394a9a95919..b7434df51e7c 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -52,6 +52,7 @@ * RF5592 2.4G/5G 2T2R * RF3070 2.4G 1T1R * RF5360 2.4G 1T1R + * RF5362 2.4G 1T1R * RF5370 2.4G 1T1R * RF5390 2.4G 1T1R */ @@ -72,6 +73,7 @@ #define RF3070 0x3070 #define RF3290 0x3290 #define RF5360 0x5360 +#define RF5362 0x5362 #define RF5370 0x5370 #define RF5372 0x5372 #define RF5390 0x5390 @@ -2145,7 +2147,7 @@ struct mac_iveiv_entry { /* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */ #define RFCSR3_PA1_BIAS_CCK FIELD8(0x70) #define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80) -/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */ +/* Bits for RF3290/RF5360/RF5362/RF5370/RF5372/RF5390/RF5392 */ #define RFCSR3_VCOCAL_EN FIELD8(0x80) /* Bits for RF3050 */ #define RFCSR3_BIT1 FIELD8(0x02) diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 893c9d5f3d6f..9f57a2db791c 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -3186,6 +3186,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, break; case RF3070: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: @@ -3203,6 +3204,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2x00_rf(rt2x00dev, RF3290) || rt2x00_rf(rt2x00dev, RF3322) || rt2x00_rf(rt2x00dev, RF5360) || + rt2x00_rf(rt2x00dev, RF5362) || rt2x00_rf(rt2x00dev, RF5370) || rt2x00_rf(rt2x00dev, RF5372) || rt2x00_rf(rt2x00dev, RF5390) || @@ -4317,6 +4319,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) case RF3070: case RF3290: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: @@ -7095,6 +7098,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) case RF3320: case RF3322: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: @@ -7551,6 +7555,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF3320: case RF3322: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: @@ -7680,6 +7685,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) case RF3070: case RF3290: case RF5360: + case RF5362: case RF5370: case RF5372: case RF5390: diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c index a0aa8fa72392..735be5352143 100644 --- a/drivers/net/wireless/ti/wl1251/spi.c +++ b/drivers/net/wireless/ti/wl1251/spi.c @@ -345,7 +345,6 @@ static int wl1251_spi_remove(struct spi_device *spi) { struct wl1251 *wl = spi_get_drvdata(spi); - free_irq(wl->irq, wl); wl1251_free_hw(wl); regulator_disable(wl->vio); diff --git a/drivers/net/wireless/ti/wlcore/debug.h b/drivers/net/wireless/ti/wlcore/debug.h index 0420bd45e4ee..27bfb7c11e7b 100644 --- a/drivers/net/wireless/ti/wlcore/debug.h +++ b/drivers/net/wireless/ti/wlcore/debug.h @@ -65,7 +65,7 @@ extern u32 wl12xx_debug_level; pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg) #define wl1271_warning(fmt, arg...) \ - pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg) + pr_warn(DRIVER_PREFIX "WARNING " fmt "\n", ##arg) #define wl1271_notice(fmt, arg...) \ pr_info(DRIVER_PREFIX fmt "\n", ##arg) diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 392c882b28f0..69601f6741d9 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -327,23 +327,22 @@ static int wl1271_probe(struct spi_device *spi) struct wl12xx_spi_glue *glue; struct wlcore_platdev_data pdev_data; struct resource res[1]; - int ret = -ENOMEM; + int ret; memset(&pdev_data, 0x00, sizeof(pdev_data)); pdev_data.pdata = dev_get_platdata(&spi->dev); if (!pdev_data.pdata) { dev_err(&spi->dev, "no platform data\n"); - ret = -ENODEV; - goto out; + return -ENODEV; } pdev_data.if_ops = &spi_ops; - glue = kzalloc(sizeof(*glue), GFP_KERNEL); + glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&spi->dev, "can't allocate glue\n"); - goto out; + return -ENOMEM; } glue->dev = &spi->dev; @@ -357,14 +356,13 @@ static int wl1271_probe(struct spi_device *spi) ret = spi_setup(spi); if (ret < 0) { dev_err(glue->dev, "spi_setup failed\n"); - goto out_free_glue; + return ret; } glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO); if (!glue->core) { dev_err(glue->dev, "can't allocate platform_device\n"); - ret = -ENOMEM; - goto out_free_glue; + return -ENOMEM; } glue->core->dev.parent = &spi->dev; @@ -398,11 +396,6 @@ static int wl1271_probe(struct spi_device *spi) out_dev_put: platform_device_put(glue->core); - -out_free_glue: - kfree(glue); - -out: return ret; } @@ -411,7 +404,6 @@ static int wl1271_remove(struct spi_device *spi) struct wl12xx_spi_glue *glue = spi_get_drvdata(spi); platform_device_unregister(glue->core); - kfree(glue); return 0; } diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 401b2453da45..a85d80012993 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -224,6 +224,8 @@ struct phy_device *of_phy_connect(struct net_device *dev, if (!phy) return NULL; + phy->dev_flags = flags; + return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy; } EXPORT_SYMBOL(of_phy_connect); diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c index c5fdcb89dacd..2a1502f351f8 100644 --- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c +++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c @@ -2128,7 +2128,7 @@ static int on_action_public23a(struct rtw_adapter *padapter, IEEE80211_BAND_5GHZ); if (cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pframe, - skb->len, 0, GFP_ATOMIC)) + skb->len, 0)) return _SUCCESS; return _FAIL; diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c index 93dc844a10b3..8b0ccb5c5fc4 100644 --- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c @@ -279,6 +279,7 @@ static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter, } bss = cfg80211_inform_bss(wiphy, notify_channel, + CFG80211_BSS_FTYPE_UNKNOWN, pnetwork->network.MacAddress, pnetwork->network.tsf, pnetwork->network.capability, @@ -2379,7 +2380,7 @@ void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter, IEEE80211_BAND_5GHZ); cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pmgmt_frame, frame_len, - 0, GFP_ATOMIC); + 0); #endif /* defined(RTW_USE_CFG80211_STA_EVENT) */ } @@ -2425,7 +2426,7 @@ void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter, frame_len = sizeof(struct ieee80211_hdr_3addr) + 2; cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, (u8 *)&mgmt, frame_len, - 0, GFP_ATOMIC); + 0); #endif /* defined(RTW_USE_CFG80211_STA_EVENT) */ } diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 3727f6d25cf1..8942dcb44180 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -422,6 +422,7 @@ static int prism2_scan(struct wiphy *wiphy, IEEE80211_BAND_2GHZ); bss = cfg80211_inform_bss(wiphy, ieee80211_get_channel(wiphy, freq), + CFG80211_BSS_FTYPE_UNKNOWN, (const u8 *) &(msg2.bssid.data.data), msg2.timestamp.data, msg2.capinfo.data, msg2.beaconperiod.data, diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index bcdc882cd415..146f48cc65d7 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -1101,7 +1101,15 @@ static void ncm_tx_tasklet(unsigned long data) /* Only send if data is available. */ if (ncm->skb_tx_data) { ncm->timer_force_tx = true; + + /* XXX This allowance of a NULL skb argument to ndo_start_xmit + * XXX is not sane. The gadget layer should be redesigned so + * XXX that the dev->wrap() invocations to build SKBs is transparent + * XXX and performed in some way outside of the ndo_start_xmit + * XXX interface. + */ ncm->netdev->netdev_ops->ndo_start_xmit(NULL, ncm->netdev); + ncm->timer_force_tx = false; } } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 4d08f45a9c29..3b1f89b6e743 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -99,36 +99,10 @@ struct vring_virtqueue #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) -static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, - unsigned int *count) -{ - return sg_next(sg); -} - -static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, - unsigned int *count) -{ - if (--(*count) == 0) - return NULL; - return sg + 1; -} - -/* Set up an indirect table of descriptors and add it to the queue. */ -static inline int vring_add_indirect(struct vring_virtqueue *vq, - struct scatterlist *sgs[], - struct scatterlist *(*next) - (struct scatterlist *, unsigned int *), - unsigned int total_sg, - unsigned int total_out, - unsigned int total_in, - unsigned int out_sgs, - unsigned int in_sgs, - gfp_t gfp) +static struct vring_desc *alloc_indirect(unsigned int total_sg, gfp_t gfp) { struct vring_desc *desc; - unsigned head; - struct scatterlist *sg; - int i, n; + unsigned int i; /* * We require lowmem mappings for the descriptors because @@ -139,57 +113,16 @@ static inline int vring_add_indirect(struct vring_virtqueue *vq, desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); if (!desc) - return -ENOMEM; - - /* Transfer entries from the sg lists into the indirect page */ - i = 0; - for (n = 0; n < out_sgs; n++) { - for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { - desc[i].flags = VRING_DESC_F_NEXT; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; - desc[i].next = i+1; - i++; - } - } - for (; n < (out_sgs + in_sgs); n++) { - for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { - desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; - desc[i].next = i+1; - i++; - } - } - BUG_ON(i != total_sg); - - /* Last one doesn't continue. */ - desc[i-1].flags &= ~VRING_DESC_F_NEXT; - desc[i-1].next = 0; - - /* We're about to use a buffer */ - vq->vq.num_free--; - - /* Use a single buffer which doesn't continue */ - head = vq->free_head; - vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; - vq->vring.desc[head].addr = virt_to_phys(desc); - /* kmemleak gives a false positive, as it's hidden by virt_to_phys */ - kmemleak_ignore(desc); - vq->vring.desc[head].len = i * sizeof(struct vring_desc); - - /* Update free pointer */ - vq->free_head = vq->vring.desc[head].next; + return NULL; - return head; + for (i = 0; i < total_sg; i++) + desc[i].next = i+1; + return desc; } static inline int virtqueue_add(struct virtqueue *_vq, struct scatterlist *sgs[], - struct scatterlist *(*next) - (struct scatterlist *, unsigned int *), - unsigned int total_out, - unsigned int total_in, + unsigned int total_sg, unsigned int out_sgs, unsigned int in_sgs, void *data, @@ -197,8 +130,10 @@ static inline int virtqueue_add(struct virtqueue *_vq, { struct vring_virtqueue *vq = to_vvq(_vq); struct scatterlist *sg; - unsigned int i, n, avail, uninitialized_var(prev), total_sg; + struct vring_desc *desc; + unsigned int i, n, avail, descs_used, uninitialized_var(prev); int head; + bool indirect; START_USE(vq); @@ -222,24 +157,40 @@ static inline int virtqueue_add(struct virtqueue *_vq, } #endif - total_sg = total_in + total_out; + BUG_ON(total_sg > vq->vring.num); + BUG_ON(total_sg == 0); + + head = vq->free_head; /* If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold */ - if (vq->indirect && total_sg > 1 && vq->vq.num_free) { - head = vring_add_indirect(vq, sgs, next, total_sg, total_out, - total_in, - out_sgs, in_sgs, gfp); - if (likely(head >= 0)) - goto add_head; + if (vq->indirect && total_sg > 1 && vq->vq.num_free) + desc = alloc_indirect(total_sg, gfp); + else + desc = NULL; + + if (desc) { + /* Use a single buffer which doesn't continue */ + vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; + vq->vring.desc[head].addr = virt_to_phys(desc); + /* avoid kmemleak false positive (hidden by virt_to_phys) */ + kmemleak_ignore(desc); + vq->vring.desc[head].len = total_sg * sizeof(struct vring_desc); + + /* Set up rest to use this indirect table. */ + i = 0; + descs_used = 1; + indirect = true; + } else { + desc = vq->vring.desc; + i = head; + descs_used = total_sg; + indirect = false; } - BUG_ON(total_sg > vq->vring.num); - BUG_ON(total_sg == 0); - - if (vq->vq.num_free < total_sg) { + if (vq->vq.num_free < descs_used) { pr_debug("Can't add buf len %i - avail = %i\n", - total_sg, vq->vq.num_free); + descs_used, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the * host should service the ring ASAP. */ @@ -250,34 +201,35 @@ static inline int virtqueue_add(struct virtqueue *_vq, } /* We're about to use some buffers from the free list. */ - vq->vq.num_free -= total_sg; + vq->vq.num_free -= descs_used; - head = i = vq->free_head; for (n = 0; n < out_sgs; n++) { - for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { - vq->vring.desc[i].flags = VRING_DESC_F_NEXT; - vq->vring.desc[i].addr = sg_phys(sg); - vq->vring.desc[i].len = sg->length; + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + desc[i].flags = VRING_DESC_F_NEXT; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; prev = i; - i = vq->vring.desc[i].next; + i = desc[i].next; } } for (; n < (out_sgs + in_sgs); n++) { - for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { - vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; - vq->vring.desc[i].addr = sg_phys(sg); - vq->vring.desc[i].len = sg->length; + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; prev = i; - i = vq->vring.desc[i].next; + i = desc[i].next; } } /* Last one doesn't continue. */ - vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; + desc[prev].flags &= ~VRING_DESC_F_NEXT; /* Update free pointer */ - vq->free_head = i; + if (indirect) + vq->free_head = vq->vring.desc[head].next; + else + vq->free_head = i; -add_head: /* Set token. */ vq->data[head] = data; @@ -324,29 +276,23 @@ int virtqueue_add_sgs(struct virtqueue *_vq, void *data, gfp_t gfp) { - unsigned int i, total_out, total_in; + unsigned int i, total_sg = 0; /* Count them first. */ - for (i = total_out = total_in = 0; i < out_sgs; i++) { - struct scatterlist *sg; - for (sg = sgs[i]; sg; sg = sg_next(sg)) - total_out++; - } - for (; i < out_sgs + in_sgs; i++) { + for (i = 0; i < out_sgs + in_sgs; i++) { struct scatterlist *sg; for (sg = sgs[i]; sg; sg = sg_next(sg)) - total_in++; + total_sg++; } - return virtqueue_add(_vq, sgs, sg_next_chained, - total_out, total_in, out_sgs, in_sgs, data, gfp); + return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_sgs); /** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. - * @sgs: array of scatterlists (need not be terminated!) - * @num: the number of scatterlists readable by other side + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * @@ -356,19 +302,19 @@ EXPORT_SYMBOL_GPL(virtqueue_add_sgs); * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_outbuf(struct virtqueue *vq, - struct scatterlist sg[], unsigned int num, + struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); + return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); /** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. - * @sgs: array of scatterlists (need not be terminated!) - * @num: the number of scatterlists writable by other side + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * @@ -378,11 +324,11 @@ EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). */ int virtqueue_add_inbuf(struct virtqueue *vq, - struct scatterlist sg[], unsigned int num, + struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) { - return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); + return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); } EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index 917dcd7965e7..e64ae7bf80a1 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -39,6 +39,11 @@ #define BCMA_RESET_CTL_RESET 0x0001 #define BCMA_RESET_ST 0x0804 +#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000 +#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001 +#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002 + /* BCMA PCI config space registers. */ #define BCMA_PCI_PMCSR 0x44 #define BCMA_PCI_PE 0x100 diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 61219b9b3445..3f626fe48c9a 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -13,6 +13,8 @@ #define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM57780 0x03625d90 +#define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7364 0xae025260 #define PHY_ID_BCM7366 0x600d8490 #define PHY_ID_BCM7439 0x600d8480 #define PHY_ID_BCM7445 0x600d8510 @@ -21,9 +23,9 @@ #define PHY_BCM_OUI_1 0x00206000 #define PHY_BCM_OUI_2 0x0143bc00 #define PHY_BCM_OUI_3 0x03625c00 -#define PHY_BCM_OUI_4 0x600d0000 +#define PHY_BCM_OUI_4 0x600d8400 #define PHY_BCM_OUI_5 0x03625e00 - +#define PHY_BCM_OUI_6 0xae025000 #define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 #define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 @@ -38,7 +40,8 @@ #define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 #define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 /* Broadcom BCM7xxx specific workarounds */ -#define PHY_BRCM_100MBPS_WAR 0x00010000 +#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff) +#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff) #define PHY_BCM_FLAGS_VALID 0x80000000 /* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */ @@ -92,4 +95,130 @@ #define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 +/* + * Broadcom LED source encodings. These are used in BCM5461, BCM5481, + * BCM5482, and possibly some others. + */ +#define BCM_LED_SRC_LINKSPD1 0x0 +#define BCM_LED_SRC_LINKSPD2 0x1 +#define BCM_LED_SRC_XMITLED 0x2 +#define BCM_LED_SRC_ACTIVITYLED 0x3 +#define BCM_LED_SRC_FDXLED 0x4 +#define BCM_LED_SRC_SLAVE 0x5 +#define BCM_LED_SRC_INTR 0x6 +#define BCM_LED_SRC_QUALITY 0x7 +#define BCM_LED_SRC_RCVLED 0x8 +#define BCM_LED_SRC_MULTICOLOR1 0xa +#define BCM_LED_SRC_OPENSHORT 0xb +#define BCM_LED_SRC_OFF 0xe /* Tied high */ +#define BCM_LED_SRC_ON 0xf /* Tied low */ + + +/* + * BCM5482: Shadow registers + * Shadow values go into bits [14:10] of register 0x1c to select a shadow + * register to access. + */ +/* 00101: Spare Control Register 3 */ +#define BCM54XX_SHD_SCR3 0x05 +#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001 +#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002 +#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004 + +/* 01010: Auto Power-Down */ +#define BCM54XX_SHD_APD 0x0a +#define BCM54XX_SHD_APD_EN 0x0020 + +#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ + /* LED3 / ~LINKSPD[2] selector */ +#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) + /* LED1 / ~LINKSPD[1] selector */ +#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) +#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */ +#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ +#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ +#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ +#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ +#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ + + +/* + * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) + */ +#define MII_BCM54XX_EXP_AADJ1CH0 0x001f +#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200 +#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100 +#define MII_BCM54XX_EXP_AADJ1CH3 0x601f +#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002 +#define MII_BCM54XX_EXP_EXP08 0x0F08 +#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001 +#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200 +#define MII_BCM54XX_EXP_EXP75 0x0f75 +#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c +#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001 +#define MII_BCM54XX_EXP_EXP96 0x0f96 +#define MII_BCM54XX_EXP_EXP96_MYST 0x0010 +#define MII_BCM54XX_EXP_EXP97 0x0f97 +#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c + +/* + * BCM5482: Secondary SerDes registers + */ +#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ +#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */ +#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */ +#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ +#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ + + +/*****************************************************************************/ +/* Fast Ethernet Transceiver definitions. */ +/*****************************************************************************/ + +#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */ +#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */ +#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */ +#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */ +#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */ +#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */ + +#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */ +#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */ + + +/*** Shadow register definitions ***/ + +#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */ +#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */ + +#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */ +#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003 +#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001 + +#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */ +#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */ + +/* + * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T + * 0x1c shadow registers. + */ +static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow) +{ + phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow)); + return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD)); +} + +static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, + u16 val) +{ + return phy_write(phydev, MII_BCM54XX_SHD, + MII_BCM54XX_SHD_WRITE | + MII_BCM54XX_SHD_VAL(shadow) | + MII_BCM54XX_SHD_DATA(val)); +} + +#define BRCM_CL45VEN_EEE_CONTROL 0x803d +#define LPI_FEATURE_EN 0x8000 +#define LPI_FEATURE_EN_DIG1000X 0x4000 + #endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/cycx_x25.h b/include/linux/cycx_x25.h deleted file mode 100644 index 362bf19d6cf1..000000000000 --- a/include/linux/cycx_x25.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef _CYCX_X25_H -#define _CYCX_X25_H -/* -* cycx_x25.h Cyclom X.25 firmware API definitions. -* -* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br> -* -* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo -* -* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com> -* -* This program is free software; you can redistribute it and/or -* modify it under the terms of the GNU General Public License -* as published by the Free Software Foundation; either version -* 2 of the License, or (at your option) any later version. -* ============================================================================ -* 2000/04/02 acme dprintk and cycx_debug -* 1999/01/03 acme judicious use of data types -* 1999/01/02 acme #define X25_ACK_N3 0x4411 -* 1998/12/28 acme cleanup: lot'o'things removed -* commands listed, -* TX25Cmd & TX25Config structs -* typedef'ed -*/ -#ifndef PACKED -#define PACKED __attribute__((packed)) -#endif - -/* X.25 shared memory layout. */ -#define X25_MBOX_OFFS 0x300 /* general mailbox block */ -#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */ - -/* Debug */ -#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a) - -extern unsigned int cycx_debug; - -/* Data Structures */ -/* X.25 Command Block. */ -struct cycx_x25_cmd { - u16 command; - u16 link; /* values: 0 or 1 */ - u16 len; /* values: 0 thru 0x205 (517) */ - u32 buf; -} PACKED; - -/* Defines for the 'command' field. */ -#define X25_CONNECT_REQUEST 0x4401 -#define X25_CONNECT_RESPONSE 0x4402 -#define X25_DISCONNECT_REQUEST 0x4403 -#define X25_DISCONNECT_RESPONSE 0x4404 -#define X25_DATA_REQUEST 0x4405 -#define X25_ACK_TO_VC 0x4406 -#define X25_INTERRUPT_RESPONSE 0x4407 -#define X25_CONFIG 0x4408 -#define X25_CONNECT_INDICATION 0x4409 -#define X25_CONNECT_CONFIRM 0x440A -#define X25_DISCONNECT_INDICATION 0x440B -#define X25_DISCONNECT_CONFIRM 0x440C -#define X25_DATA_INDICATION 0x440E -#define X25_INTERRUPT_INDICATION 0x440F -#define X25_ACK_FROM_VC 0x4410 -#define X25_ACK_N3 0x4411 -#define X25_CONNECT_COLLISION 0x4413 -#define X25_N3WIN 0x4414 -#define X25_LINE_ON 0x4415 -#define X25_LINE_OFF 0x4416 -#define X25_RESET_REQUEST 0x4417 -#define X25_LOG 0x4500 -#define X25_STATISTIC 0x4600 -#define X25_TRACE 0x4700 -#define X25_N2TRACEXC 0x4702 -#define X25_N3TRACEXC 0x4703 - -/** - * struct cycx_x25_config - cyclom2x x25 firmware configuration - * @link - link number - * @speed - line speed - * @clock - internal/external - * @n2 - # of level 2 retransm.(values: 1 thru FF) - * @n2win - level 2 window (values: 1 thru 7) - * @n3win - level 3 window (values: 1 thru 7) - * @nvc - # of logical channels (values: 1 thru 64) - * @pktlen - level 3 packet length - log base 2 of size - * @locaddr - my address - * @remaddr - remote address - * @t1 - time, in seconds - * @t2 - time, in seconds - * @t21 - time, in seconds - * @npvc - # of permanent virt. circuits (1 thru nvc) - * @t23 - time, in seconds - * @flags - see dosx25.doc, in portuguese, for details - */ -struct cycx_x25_config { - u8 link; - u8 speed; - u8 clock; - u8 n2; - u8 n2win; - u8 n3win; - u8 nvc; - u8 pktlen; - u8 locaddr; - u8 remaddr; - u16 t1; - u16 t2; - u8 t21; - u8 npvc; - u8 t23; - u8 flags; -} PACKED; - -struct cycx_x25_stats { - u16 rx_crc_errors; - u16 rx_over_errors; - u16 n2_tx_frames; - u16 n2_rx_frames; - u16 tx_timeouts; - u16 rx_timeouts; - u16 n3_tx_packets; - u16 n3_rx_packets; - u16 tx_aborts; - u16 rx_aborts; -} PACKED; -#endif /* _CYCX_X25_H */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 9c5529dc6d07..733980fce8e3 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -29,6 +29,7 @@ #include <asm/bitsperlong.h> #ifdef __KERNEL__ +u32 eth_get_headlen(void *data, unsigned int max_len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index e658229fee39..c1a2d60dfb82 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -257,6 +257,10 @@ struct ethtool_ops { struct ethtool_eeprom *, u8 *); int (*get_eee)(struct net_device *, struct ethtool_eee *); int (*set_eee)(struct net_device *, struct ethtool_eee *); + int (*get_tunable)(struct net_device *, + const struct ethtool_tunable *, void *); + int (*set_tunable)(struct net_device *, + const struct ethtool_tunable *, const void *); }; diff --git a/include/linux/filter.h b/include/linux/filter.h index a5227ab8ccb1..1a0bc6d134d7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -4,58 +4,23 @@ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ +#include <stdarg.h> + #include <linux/atomic.h> #include <linux/compat.h> #include <linux/skbuff.h> +#include <linux/linkage.h> +#include <linux/printk.h> #include <linux/workqueue.h> -#include <uapi/linux/filter.h> -/* Internally used and optimized filter representation with extended - * instruction set based on top of classic BPF. - */ +#include <asm/cacheflush.h> -/* instruction classes */ -#define BPF_ALU64 0x07 /* alu mode in double word width */ - -/* ld/ldx fields */ -#define BPF_DW 0x18 /* double word */ -#define BPF_XADD 0xc0 /* exclusive add */ - -/* alu/jmp fields */ -#define BPF_MOV 0xb0 /* mov reg to reg */ -#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ - -/* change endianness of a register */ -#define BPF_END 0xd0 /* flags for endianness conversion: */ -#define BPF_TO_LE 0x00 /* convert to little-endian */ -#define BPF_TO_BE 0x08 /* convert to big-endian */ -#define BPF_FROM_LE BPF_TO_LE -#define BPF_FROM_BE BPF_TO_BE - -#define BPF_JNE 0x50 /* jump != */ -#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ -#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ -#define BPF_CALL 0x80 /* function call */ -#define BPF_EXIT 0x90 /* function return */ - -/* Register numbers */ -enum { - BPF_REG_0 = 0, - BPF_REG_1, - BPF_REG_2, - BPF_REG_3, - BPF_REG_4, - BPF_REG_5, - BPF_REG_6, - BPF_REG_7, - BPF_REG_8, - BPF_REG_9, - BPF_REG_10, - __MAX_BPF_REG, -}; +#include <uapi/linux/filter.h> +#include <uapi/linux/bpf.h> -/* BPF has 10 general purpose 64-bit registers and stack frame. */ -#define MAX_BPF_REG __MAX_BPF_REG +struct sk_buff; +struct sock; +struct seccomp_data; /* ArgX, context and stack frame pointer register positions. Note, * Arg1, Arg2, Arg3, etc are used as argument mappings of function @@ -161,6 +126,24 @@ enum { .off = 0, \ .imm = IMM }) +/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ +#define BPF_LD_IMM64(DST, IMM) \ + BPF_LD_IMM64_RAW(DST, 0, IMM) + +#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_DW | BPF_IMM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = (__u32) (IMM) }), \ + ((struct bpf_insn) { \ + .code = 0, /* zero is reserved opcode */ \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = ((__u64) (IMM)) >> 32 }) + /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ @@ -299,14 +282,6 @@ enum { #define SK_RUN_FILTER(filter, ctx) \ (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) -struct bpf_insn { - __u8 code; /* opcode */ - __u8 dst_reg:4; /* dest register */ - __u8 src_reg:4; /* source register */ - __s16 off; /* signed offset */ - __s32 imm; /* signed immediate constant */ -}; - #ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { @@ -320,20 +295,28 @@ struct sock_fprog_kern { struct sock_filter *filter; }; -struct sk_buff; -struct sock; -struct seccomp_data; +struct bpf_binary_header { + unsigned int pages; + u8 image[]; +}; + +struct bpf_work_struct { + struct bpf_prog *prog; + struct work_struct work; +}; struct bpf_prog { - u32 jited:1, /* Is our filter JIT'ed? */ - len:31; /* Number of filter blocks */ + u16 pages; /* Number of allocated pages */ + bool jited; /* Is our filter JIT'ed? */ + u32 len; /* Number of filter blocks */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ + struct bpf_work_struct *work; /* Deferred free work struct */ unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); + /* Instructions for interpreter */ union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; - struct work_struct work; }; }; @@ -353,6 +336,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) +#ifdef CONFIG_DEBUG_SET_MODULE_RONX +static inline void bpf_prog_lock_ro(struct bpf_prog *fp) +{ + set_memory_ro((unsigned long)fp, fp->pages); +} + +static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) +{ + set_memory_rw((unsigned long)fp, fp->pages); +} +#else +static inline void bpf_prog_lock_ro(struct bpf_prog *fp) +{ +} + +static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) +{ +} +#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ + int sk_filter(struct sock *sk, struct sk_buff *skb); void bpf_prog_select_runtime(struct bpf_prog *fp); @@ -361,6 +364,17 @@ void bpf_prog_free(struct bpf_prog *fp); int bpf_convert_filter(struct sock_filter *prog, int len, struct bpf_insn *new_prog, int *new_len); +struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); +struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, + gfp_t gfp_extra_flags); +void __bpf_prog_free(struct bpf_prog *fp); + +static inline void bpf_prog_unlock_free(struct bpf_prog *fp) +{ + bpf_prog_unlock_ro(fp); + __bpf_prog_free(fp); +} + int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); void bpf_prog_destroy(struct bpf_prog *fp); @@ -377,6 +391,38 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_int_jit_compile(struct bpf_prog *fp); +#ifdef CONFIG_BPF_JIT +typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); + +struct bpf_binary_header * +bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, + unsigned int alignment, + bpf_jit_fill_hole_t bpf_fill_ill_insns); +void bpf_jit_binary_free(struct bpf_binary_header *hdr); + +void bpf_jit_compile(struct bpf_prog *fp); +void bpf_jit_free(struct bpf_prog *fp); + +static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, + u32 pass, void *image) +{ + pr_err("flen=%u proglen=%u pass=%u image=%pK\n", + flen, proglen, pass, image); + if (image) + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, + 16, 1, image, proglen, false); +} +#else +static inline void bpf_jit_compile(struct bpf_prog *fp) +{ +} + +static inline void bpf_jit_free(struct bpf_prog *fp) +{ + bpf_prog_unlock_free(fp); +} +#endif /* CONFIG_BPF_JIT */ + #define BPF_ANC BIT(15) static inline u16 bpf_anc_helper(const struct sock_filter *ftest) @@ -424,36 +470,6 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, return bpf_internal_load_pointer_neg_helper(skb, k, size); } -#ifdef CONFIG_BPF_JIT -#include <stdarg.h> -#include <linux/linkage.h> -#include <linux/printk.h> - -void bpf_jit_compile(struct bpf_prog *fp); -void bpf_jit_free(struct bpf_prog *fp); - -static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, - u32 pass, void *image) -{ - pr_err("flen=%u proglen=%u pass=%u image=%pK\n", - flen, proglen, pass, image); - if (image) - print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, - 16, 1, image, proglen, false); -} -#else -#include <linux/slab.h> - -static inline void bpf_jit_compile(struct bpf_prog *fp) -{ -} - -static inline void bpf_jit_free(struct bpf_prog *fp) -{ - kfree(fp); -} -#endif /* CONFIG_BPF_JIT */ - static inline int bpf_tell_extensions(void) { return SKF_AD_MAX; diff --git a/include/linux/i82593.h b/include/linux/i82593.h deleted file mode 100644 index afac5c7a323d..000000000000 --- a/include/linux/i82593.h +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Definitions for Intel 82593 CSMA/CD Core LAN Controller - * The definitions are taken from the 1992 users manual with Intel - * order number 297125-001. - * - * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp - * - * Copyright 1994, Anders Klemets <klemets@it.kth.se> - * - * HISTORY - * i82593.h,v - * Revision 1.4 2005/11/4 09:15:00 baroniunas - * Modified copyright with permission of author as follows: - * - * "If I82539.H is the only file with my copyright statement - * that is included in the Source Forge project, then you have - * my approval to change the copyright statement to be a GPL - * license, in the way you proposed on October 10." - * - * Revision 1.1 1996/07/17 15:23:12 root - * Initial revision - * - * Revision 1.3 1995/04/05 15:13:58 adj - * Initial alpha release - * - * Revision 1.2 1994/06/16 23:57:31 klemets - * Mirrored all the fields in the configuration block. - * - * Revision 1.1 1994/06/02 20:25:34 klemets - * Initial revision - * - * - */ -#ifndef _I82593_H -#define _I82593_H - -/* Intel 82593 CSMA/CD Core LAN Controller */ - -/* Port 0 Command Register definitions */ - -/* Execution operations */ -#define OP0_NOP 0 /* CHNL = 0 */ -#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */ -#define OP0_IA_SETUP 1 -#define OP0_CONFIGURE 2 -#define OP0_MC_SETUP 3 -#define OP0_TRANSMIT 4 -#define OP0_TDR 5 -#define OP0_DUMP 6 -#define OP0_DIAGNOSE 7 -#define OP0_TRANSMIT_NO_CRC 9 -#define OP0_RETRANSMIT 12 -#define OP0_ABORT 13 -/* Reception operations */ -#define OP0_RCV_ENABLE 8 -#define OP0_RCV_DISABLE 10 -#define OP0_STOP_RCV 11 -/* Status pointer control operations */ -#define OP0_FIX_PTR 15 /* CHNL = 1 */ -#define OP0_RLS_PTR 15 /* CHNL = 0 */ -#define OP0_RESET 14 - -#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */ -#define CR0_STATUS_0 0x00 -#define CR0_STATUS_1 0x20 -#define CR0_STATUS_2 0x40 -#define CR0_STATUS_3 0x60 -#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */ - -/* Port 0 Status Register definitions */ - -#define SR0_NO_RESULT 0 /* dummy */ -#define SR0_EVENT_MASK 0x0f -#define SR0_IA_SETUP_DONE 1 -#define SR0_CONFIGURE_DONE 2 -#define SR0_MC_SETUP_DONE 3 -#define SR0_TRANSMIT_DONE 4 -#define SR0_TDR_DONE 5 -#define SR0_DUMP_DONE 6 -#define SR0_DIAGNOSE_PASSED 7 -#define SR0_TRANSMIT_NO_CRC_DONE 9 -#define SR0_RETRANSMIT_DONE 12 -#define SR0_EXECUTION_ABORTED 13 -#define SR0_END_OF_FRAME 8 -#define SR0_RECEPTION_ABORTED 10 -#define SR0_DIAGNOSE_FAILED 15 -#define SR0_STOP_REG_HIT 11 - -#define SR0_CHNL (1 << 4) -#define SR0_EXECUTION (1 << 5) -#define SR0_RECEPTION (1 << 6) -#define SR0_INTERRUPT (1 << 7) -#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION) - -#define SR3_EXEC_STATE_MASK 0x03 -#define SR3_EXEC_IDLE 0 -#define SR3_TX_ABORT_IN_PROGRESS 1 -#define SR3_EXEC_ACTIVE 2 -#define SR3_ABORT_IN_PROGRESS 3 -#define SR3_EXEC_CHNL (1 << 2) -#define SR3_STP_ON_NO_RSRC (1 << 3) -#define SR3_RCVING_NO_RSRC (1 << 4) -#define SR3_RCV_STATE_MASK 0x60 -#define SR3_RCV_IDLE 0x00 -#define SR3_RCV_READY 0x20 -#define SR3_RCV_ACTIVE 0x40 -#define SR3_RCV_STOP_IN_PROG 0x60 -#define SR3_RCV_CHNL (1 << 7) - -/* Port 1 Command Register definitions */ - -#define OP1_NOP 0 -#define OP1_SWIT_TO_PORT_0 1 -#define OP1_INT_DISABLE 2 -#define OP1_INT_ENABLE 3 -#define OP1_SET_TS 5 -#define OP1_RST_TS 7 -#define OP1_POWER_DOWN 8 -#define OP1_RESET_RING_MNGMT 11 -#define OP1_RESET 14 -#define OP1_SEL_RST 15 - -#define CR1_STATUS_4 0x00 -#define CR1_STATUS_5 0x20 -#define CR1_STATUS_6 0x40 -#define CR1_STOP_REG_UPDATE (1 << 7) - -/* Receive frame status bits */ - -#define RX_RCLD (1 << 0) -#define RX_IA_MATCH (1 << 1) -#define RX_NO_AD_MATCH (1 << 2) -#define RX_NO_SFD (1 << 3) -#define RX_SRT_FRM (1 << 7) -#define RX_OVRRUN (1 << 8) -#define RX_ALG_ERR (1 << 10) -#define RX_CRC_ERR (1 << 11) -#define RX_LEN_ERR (1 << 12) -#define RX_RCV_OK (1 << 13) -#define RX_TYP_LEN (1 << 15) - -/* Transmit status bits */ - -#define TX_NCOL_MASK 0x0f -#define TX_FRTL (1 << 4) -#define TX_MAX_COL (1 << 5) -#define TX_HRT_BEAT (1 << 6) -#define TX_DEFER (1 << 7) -#define TX_UND_RUN (1 << 8) -#define TX_LOST_CTS (1 << 9) -#define TX_LOST_CRS (1 << 10) -#define TX_LTCOL (1 << 11) -#define TX_OK (1 << 13) -#define TX_COLL (1 << 15) - -struct i82593_conf_block { - u_char fifo_limit : 4, - forgnesi : 1, - fifo_32 : 1, - d6mod : 1, - throttle_enb : 1; - u_char throttle : 6, - cntrxint : 1, - contin : 1; - u_char addr_len : 3, - acloc : 1, - preamb_len : 2, - loopback : 2; - u_char lin_prio : 3, - tbofstop : 1, - exp_prio : 3, - bof_met : 1; - u_char : 4, - ifrm_spc : 4; - u_char : 5, - slottim_low : 3; - u_char slottim_hi : 3, - : 1, - max_retr : 4; - u_char prmisc : 1, - bc_dis : 1, - : 1, - crs_1 : 1, - nocrc_ins : 1, - crc_1632 : 1, - : 1, - crs_cdt : 1; - u_char cs_filter : 3, - crs_src : 1, - cd_filter : 3, - : 1; - u_char : 2, - min_fr_len : 6; - u_char lng_typ : 1, - lng_fld : 1, - rxcrc_xf : 1, - artx : 1, - sarec : 1, - tx_jabber : 1, /* why is this called max_len in the manual? */ - hash_1 : 1, - lbpkpol : 1; - u_char : 6, - fdx : 1, - : 1; - u_char dummy_6 : 6, /* supposed to be ones */ - mult_ia : 1, - dis_bof : 1; - u_char dummy_1 : 1, /* supposed to be one */ - tx_ifs_retrig : 2, - mc_all : 1, - rcv_mon : 2, - frag_acpt : 1, - tstrttrs : 1; - u_char fretx : 1, - runt_eop : 1, - hw_sw_pin : 1, - big_endn : 1, - syncrqs : 1, - sttlen : 1, - tx_eop : 1, - rx_eop : 1; - u_char rbuf_size : 5, - rcvstop : 1, - : 2; -}; - -#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */ - -#endif /* _I82593_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 63ab3873c5ed..8018c915ee63 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -838,6 +838,16 @@ enum ieee80211_vht_opmode_bits { #define WLAN_SA_QUERY_TR_ID_LEN 2 +/** + * struct ieee80211_tpc_report_ie + * + * This structure refers to "TPC Report element" + */ +struct ieee80211_tpc_report_ie { + u8 tx_power; + u8 link_margin; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -973,6 +983,13 @@ struct ieee80211_mgmt { u8 action_code; u8 operating_mode; } __packed vht_opmode_notif; + struct { + u8 action_code; + u8 dialog_token; + u8 tpc_elem_id; + u8 tpc_elem_length; + struct ieee80211_tpc_report_ie tpc; + } __packed tpc_report; } u; } __packed action; } u; @@ -1865,6 +1882,7 @@ enum ieee80211_category { WLAN_CATEGORY_DLS = 2, WLAN_CATEGORY_BACK = 3, WLAN_CATEGORY_PUBLIC = 4, + WLAN_CATEGORY_RADIO_MEASUREMENT = 5, WLAN_CATEGORY_HT = 7, WLAN_CATEGORY_SA_QUERY = 8, WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, @@ -2378,4 +2396,51 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) +/** + * ieee80211_action_contains_tpc - checks if the frame contains TPC element + * @skb: the skb containing the frame, length will be checked + * + * This function checks if it's either TPC report action frame or Link + * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5 + * and 8.5.7.5 accordingly. + */ +static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (!ieee80211_is_action(mgmt->frame_control)) + return false; + + if (skb->len < IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.tpc_report)) + return false; + + /* + * TPC report - check that: + * category = 0 (Spectrum Management) or 5 (Radio Measurement) + * spectrum management action = 3 (TPC/Link Measurement report) + * TPC report EID = 35 + * TPC report element length = 2 + * + * The spectrum management's tpc_report struct is used here both for + * parsing tpc_report and radio measurement's link measurement report + * frame, since the relevant part is identical in both frames. + */ + if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT && + mgmt->u.action.category != WLAN_CATEGORY_RADIO_MEASUREMENT) + return false; + + /* both spectrum mgmt and link measurement have same action code */ + if (mgmt->u.action.u.tpc_report.action_code != + WLAN_ACTION_SPCT_TPC_RPRT) + return false; + + if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT || + mgmt->u.action.u.tpc_report.tpc_elem_length != + sizeof(struct ieee80211_tpc_report_ie)) + return false; + + return true; +} + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/igmp.h b/include/linux/igmp.h index f47550d75f85..2c677afeea47 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -39,6 +39,7 @@ static inline struct igmpv3_query * extern int sysctl_igmp_max_memberships; extern int sysctl_igmp_max_msf; +extern int sysctl_igmp_qrv; struct ip_sf_socklist { unsigned int sl_max; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a5b7d7cfcedf..03b5608a4329 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -38,6 +38,7 @@ #include <linux/completion.h> #include <linux/radix-tree.h> #include <linux/cpu_rmap.h> +#include <linux/crash_dump.h> #include <linux/atomic.h> @@ -184,19 +185,24 @@ enum { MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9, MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, + MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12, + MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13 }; enum { MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0, - MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1 + MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1, + MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2, + MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3 }; enum { - MLX4_USER_DEV_CAP_64B_CQE = 1L << 0 + MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0 }; enum { - MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0 + MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, + MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 }; @@ -1279,7 +1285,7 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, /* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) { - return reset_devices; + return is_kdump_kernel(); } #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c8e388e5fccc..4354b4307e37 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -543,7 +543,7 @@ struct netdev_queue { * read mostly part */ struct net_device *dev; - struct Qdisc *qdisc; + struct Qdisc __rcu *qdisc; struct Qdisc *qdisc_sleeping; #ifdef CONFIG_SYSFS struct kobject kobj; @@ -1747,6 +1747,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, return &dev->_tx[index]; } +static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, + const struct sk_buff *skb) +{ + return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); +} + static inline void netdev_for_each_tx_queue(struct net_device *dev, void (*f)(struct net_device *, struct netdev_queue *, @@ -1781,24 +1787,13 @@ void dev_net_set(struct net_device *dev, struct net *net) #endif } -static inline bool netdev_uses_dsa_tags(struct net_device *dev) -{ -#ifdef CONFIG_NET_DSA_TAG_DSA - if (dev->dsa_ptr != NULL) - return dsa_uses_dsa_tags(dev->dsa_ptr); -#endif - - return 0; -} - -static inline bool netdev_uses_trailer_tags(struct net_device *dev) +static inline bool netdev_uses_dsa(struct net_device *dev) { -#ifdef CONFIG_NET_DSA_TAG_TRAILER +#if IS_ENABLED(CONFIG_NET_DSA) if (dev->dsa_ptr != NULL) - return dsa_uses_trailer_tags(dev->dsa_ptr); + return dsa_uses_tagged_protocol(dev->dsa_ptr); #endif - - return 0; + return false; } /** @@ -1879,11 +1874,17 @@ struct napi_gro_cb { /* jiffies when first packet was created/queued */ unsigned long age; - /* Used in ipv6_gro_receive() */ + /* Used in ipv6_gro_receive() and foo-over-udp */ u16 proto; /* Used in udp_gro_receive */ - u16 udp_mark; + u8 udp_mark:1; + + /* GRO checksum is valid */ + u8 csum_valid:1; + + /* Number of checksums via CHECKSUM_UNNECESSARY */ + u8 csum_cnt:3; /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; @@ -1924,6 +1925,7 @@ struct packet_offload { struct udp_offload { __be16 port; + u8 ipproto; struct offload_callbacks callbacks; }; @@ -1982,6 +1984,7 @@ struct pcpu_sw_netstats { #define NETDEV_CHANGEUPPER 0x0015 #define NETDEV_RESEND_IGMP 0x0016 #define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ +#define NETDEV_CHANGEINFODATA 0x0018 int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@ -2074,8 +2077,8 @@ void __dev_remove_pack(struct packet_type *pt); void dev_add_offload(struct packet_offload *po); void dev_remove_offload(struct packet_offload *po); -struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, - unsigned short mask); +struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, + unsigned short mask); struct net_device *dev_get_by_name(struct net *net, const char *name); struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); @@ -2153,11 +2156,97 @@ static inline void *skb_gro_network_header(struct sk_buff *skb) static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len) { - if (skb->ip_summed == CHECKSUM_COMPLETE) + if (NAPI_GRO_CB(skb)->csum_valid) NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, csum_partial(start, len, 0)); } +/* GRO checksum functions. These are logical equivalents of the normal + * checksum functions (in skbuff.h) except that they operate on the GRO + * offsets and fields in sk_buff. + */ + +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb); + +static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, + bool zero_okay, + __sum16 check) +{ + return (skb->ip_summed != CHECKSUM_PARTIAL && + NAPI_GRO_CB(skb)->csum_cnt == 0 && + (!zero_okay || check)); +} + +static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, + __wsum psum) +{ + if (NAPI_GRO_CB(skb)->csum_valid && + !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) + return 0; + + NAPI_GRO_CB(skb)->csum = psum; + + return __skb_gro_checksum_complete(skb); +} + +static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) +{ + if (NAPI_GRO_CB(skb)->csum_cnt > 0) { + /* Consume a checksum from CHECKSUM_UNNECESSARY */ + NAPI_GRO_CB(skb)->csum_cnt--; + } else { + /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we + * verified a new top level checksum or an encapsulated one + * during GRO. This saves work if we fallback to normal path. + */ + __skb_incr_checksum_unnecessary(skb); + } +} + +#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ + compute_pseudo) \ +({ \ + __sum16 __ret = 0; \ + if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ + __ret = __skb_gro_checksum_validate_complete(skb, \ + compute_pseudo(skb, proto)); \ + if (__ret) \ + __skb_mark_checksum_bad(skb); \ + else \ + skb_gro_incr_csum_unnecessary(skb); \ + __ret; \ +}) + +#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ + __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) + +#define skb_gro_checksum_validate_zero_check(skb, proto, check, \ + compute_pseudo) \ + __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) + +#define skb_gro_checksum_simple_validate(skb) \ + __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) + +static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) +{ + return (NAPI_GRO_CB(skb)->csum_cnt == 0 && + !NAPI_GRO_CB(skb)->csum_valid); +} + +static inline void __skb_gro_checksum_convert(struct sk_buff *skb, + __sum16 check, __wsum pseudo) +{ + NAPI_GRO_CB(skb)->csum = ~pseudo; + NAPI_GRO_CB(skb)->csum_valid = 1; +} + +#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ +do { \ + if (__skb_gro_checksum_convert_check(skb)) \ + __skb_gro_checksum_convert(skb, check, \ + compute_pseudo(skb, proto)); \ +} while (0) + static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, @@ -2261,12 +2350,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd, DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); void __netif_schedule(struct Qdisc *q); - -static inline void netif_schedule_queue(struct netdev_queue *txq) -{ - if (!(txq->state & QUEUE_STATE_ANY_XOFF)) - __netif_schedule(txq->qdisc); -} +void netif_schedule_queue(struct netdev_queue *txq); static inline void netif_tx_schedule_all(struct net_device *dev) { @@ -2302,11 +2386,7 @@ static inline void netif_tx_start_all_queues(struct net_device *dev) } } -static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) -{ - if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) - __netif_schedule(dev_queue->qdisc); -} +void netif_tx_wake_queue(struct netdev_queue *dev_queue); /** * netif_wake_queue - restart transmit @@ -2578,19 +2658,7 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev, return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); } -/** - * netif_wake_subqueue - allow sending packets on subqueue - * @dev: network device - * @queue_index: sub queue index - * - * Resume individual transmit queue of a device with multiple transmit queues. - */ -static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) -{ - struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); - if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) - __netif_schedule(txq->qdisc); -} +void netif_wake_subqueue(struct net_device *dev, u16 queue_index); #ifdef CONFIG_XPS int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, @@ -2754,8 +2822,9 @@ int dev_set_mac_address(struct net_device *, struct sockaddr *); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_port_id *ppid); -int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq); +struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev); +struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, int *ret); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); @@ -3357,6 +3426,27 @@ int __init dev_proc_init(void); #define dev_proc_init() 0 #endif +static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, + struct sk_buff *skb, struct net_device *dev, + bool more) +{ + skb->xmit_more = more ? 1 : 0; + return ops->ndo_start_xmit(skb, dev); +} + +static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, bool more) +{ + const struct net_device_ops *ops = dev->netdev_ops; + int rc; + + rc = __netdev_start_xmit(ops, skb, dev, more); + if (rc == NETDEV_TX_OK) + txq_trans_update(txq); + + return rc; +} + int netdev_class_create_file_ns(struct class_attribute *class_attr, const void *ns); void netdev_class_remove_file_ns(struct class_attribute *class_attr, diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h index 68c2aea897f5..fe2622a00151 100644 --- a/include/linux/netfilter/ipset/ip_set_list.h +++ b/include/linux/netfilter/ipset/ip_set_list.h @@ -6,5 +6,6 @@ #define IP_SET_LIST_DEFAULT_SIZE 8 #define IP_SET_LIST_MIN_SIZE 4 +#define IP_SET_LIST_MAX_SIZE 65536 #endif /* __IP_SET_LIST_H */ diff --git a/include/linux/phonedev.h b/include/linux/phonedev.h deleted file mode 100644 index 4269de99e320..000000000000 --- a/include/linux/phonedev.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef __LINUX_PHONEDEV_H -#define __LINUX_PHONEDEV_H - -#include <linux/types.h> - -#ifdef __KERNEL__ - -#include <linux/poll.h> - -struct phone_device { - struct phone_device *next; - const struct file_operations *f_op; - int (*open) (struct phone_device *, struct file *); - int board; /* Device private index */ - int minor; -}; - -extern int phonedev_init(void); -#define PHONE_MAJOR 100 -extern int phone_register_device(struct phone_device *, int unit); -#define PHONE_UNIT_ANY -1 -extern void phone_unregister_device(struct phone_device *); - -#endif -#endif diff --git a/include/linux/phy.h b/include/linux/phy.h index ed39956b5613..d090cfcaa167 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -598,6 +598,19 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) } /** + * phy_read_mmd_indirect - reads data from the MMD registers + * @phydev: The PHY device bus + * @prtad: MMD Address + * @devad: MMD DEVAD + * @addr: PHY address on the MII bus + * + * Description: it reads data from the MMD registers (clause 22 to access to + * clause 45) of the specified phy address. + */ +int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, + int devad, int addr); + +/** * phy_read - Convenience function for reading a given PHY register * @phydev: the phy_device struct * @regnum: register number to read @@ -668,6 +681,20 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad, return mdiobus_write(phydev->bus, phydev->addr, regnum, val); } +/** + * phy_write_mmd_indirect - writes data to the MMD registers + * @phydev: The PHY device + * @prtad: MMD Address + * @devad: MMD DEVAD + * @addr: PHY address on the MII bus + * @data: data to write in the MMD register + * + * Description: Write data from the MMD registers of the specified + * phy address. + */ +void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, + int devad, int addr, u32 data); + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index ae612acebb53..941138664c1d 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -18,6 +18,9 @@ extern int fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, struct device_node *np); extern void fixed_phy_del(int phy_addr); +extern int fixed_phy_set_link_update(struct phy_device *phydev, + int (*link_update)(struct net_device *, + struct fixed_phy_status *)); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status) @@ -34,14 +37,12 @@ static inline int fixed_phy_del(int phy_addr) { return -ENODEV; } -#endif /* CONFIG_FIXED_PHY */ - -/* - * This function issued only by fixed_phy-aware drivers, no need - * protect it with #ifdef - */ -extern int fixed_phy_set_link_update(struct phy_device *phydev, +static inline int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, - struct fixed_phy_status *)); + struct fixed_phy_status *)) +{ + return -ENODEV; +} +#endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ diff --git a/include/linux/random.h b/include/linux/random.h index 57fbbffd77a0..b05856e16b75 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -26,7 +26,7 @@ unsigned int get_random_int(void); unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); u32 prandom_u32(void); -void prandom_bytes(void *buf, int nbytes); +void prandom_bytes(void *buf, size_t nbytes); void prandom_seed(u32 seed); void prandom_reseed_late(void); @@ -35,7 +35,7 @@ struct rnd_state { }; u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); /** * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 36826c0166c5..fb298e9d6d3a 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -44,6 +44,7 @@ struct rhashtable; * @head_offset: Offset of rhash_head in struct to be hashed * @hash_rnd: Seed to use while hashing * @max_shift: Maximum number of shifts while expanding + * @min_shift: Minimum number of shifts while shrinking * @hashfn: Function to hash key * @obj_hashfn: Function to hash object * @grow_decision: If defined, may return true if table should expand @@ -57,6 +58,7 @@ struct rhashtable_params { size_t head_offset; u32 hash_rnd; size_t max_shift; + size_t min_shift; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; bool (*grow_decision)(const struct rhashtable *ht, diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 167bae7bdfa4..6cacbce1a06c 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -47,6 +47,16 @@ static inline int lockdep_rtnl_is_held(void) rcu_dereference_check(p, lockdep_rtnl_is_held()) /** + * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking + * @p: The pointer to read, prior to dereference + * + * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh() + * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh() + */ +#define rcu_dereference_bh_rtnl(p) \ + rcu_dereference_bh_check(p, lockdep_rtnl_is_held()) + +/** * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL * @p: The pointer to read, prior to dereferencing * diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index abde271c18ae..f1bfa3781c75 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -47,11 +47,29 @@ * * The hardware you're dealing with doesn't calculate the full checksum * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums - * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will - * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still - * undefined in this case though. It is a bad option, but, unfortunately, - * nowadays most vendors do this. Apparently with the secret goal to sell - * you new devices, when you will add new protocol to your host, f.e. IPv6 8) + * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY + * if their checksums are okay. skb->csum is still undefined in this case + * though. It is a bad option, but, unfortunately, nowadays most vendors do + * this. Apparently with the secret goal to sell you new devices, when you + * will add new protocol to your host, f.e. IPv6 8) + * + * CHECKSUM_UNNECESSARY is applicable to following protocols: + * TCP: IPv6 and IPv4. + * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a + * zero UDP checksum for either IPv4 or IPv6, the networking stack + * may perform further validation in this case. + * GRE: only if the checksum is present in the header. + * SCTP: indicates the CRC in SCTP header has been validated. + * + * skb->csum_level indicates the number of consecutive checksums found in + * the packet minus one that have been verified as CHECKSUM_UNNECESSARY. + * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet + * and a device is able to verify the checksums for UDP (possibly zero), + * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to + * two. If the device were only able to verify the UDP checksum and not + * GRE, either because it doesn't support GRE checksum of because GRE + * checksum is bad, skb->csum_level would be set to zero (TCP checksum is + * not considered in this case). * * CHECKSUM_COMPLETE: * @@ -112,6 +130,9 @@ #define CHECKSUM_COMPLETE 2 #define CHECKSUM_PARTIAL 3 +/* Maximum value in skb->csum_level */ +#define SKB_MAX_CSUM_LEVEL 3 + #define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) #define SKB_WITH_OVERHEAD(X) \ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) @@ -452,6 +473,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @tc_verd: traffic control verdict * @hash: the packet hash * @queue_mapping: Queue mapping for multiqueue devices + * @xmit_more: More SKBs are pending for this queue * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -526,6 +548,16 @@ struct sk_buff { ip_summed:2, nohdr:1, nfctinfo:3; + +/* if you move pkt_type around you also must adapt those constants */ +#ifdef __BIG_ENDIAN_BITFIELD +#define PKT_TYPE_MAX (7 << 5) +#else +#define PKT_TYPE_MAX 7 +#endif +#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset) + + __u8 __pkt_type_offset[0]; __u8 pkt_type:3, fclone:2, ipvs_property:1, @@ -558,6 +590,7 @@ struct sk_buff { __u16 queue_mapping; kmemcheck_bitfield_begin(flags2); + __u8 xmit_more:1; #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif @@ -569,16 +602,12 @@ struct sk_buff { __u8 wifi_acked:1; __u8 no_fcs:1; __u8 head_frag:1; - /* Encapsulation protocol and NIC drivers should use - * this flag to indicate to each other if the skb contains - * encapsulated packet or not and maybe use the inner packet - * headers if needed - */ + /* Indicates the inner headers are valid in the skbuff. */ __u8 encapsulation:1; __u8 encap_hdr_csum:1; __u8 csum_valid:1; __u8 csum_complete_sw:1; - /* 2/4 bit hole (depending on ndisc_nodetype presence) */ + /* 1/3 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL @@ -596,6 +625,12 @@ struct sk_buff { __u32 reserved_tailroom; }; + kmemcheck_bitfield_begin(flags3); + __u8 csum_level:2; + __u8 csum_bad:1; + /* 13 bit hole */ + kmemcheck_bitfield_end(flags3); + __be16 inner_protocol; __u16 inner_transport_header; __u16 inner_network_header; @@ -734,6 +769,12 @@ static inline struct sk_buff *alloc_skb(unsigned int size, return __alloc_skb(size, priority, 0, NUMA_NO_NODE); } +struct sk_buff *alloc_skb_with_frags(unsigned long header_len, + unsigned long data_len, + int max_page_order, + int *errcode, + gfp_t gfp_mask); + static inline struct sk_buff *alloc_skb_fclone(unsigned int size, gfp_t priority) { @@ -1860,18 +1901,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) return pskb_may_pull(skb, skb_network_offset(skb) + len); } -static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb) -{ - /* Only continue with checksum unnecessary if device indicated - * it is valid across encapsulation (skb->encapsulation was set). - */ - if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation) - skb->ip_summed = CHECKSUM_NONE; - - skb->encapsulation = 0; - skb->csum_valid = 0; -} - /* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the @@ -2567,20 +2596,26 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum); -static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, - int len, void *buffer) +static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *data, int hlen, void *buffer) { - int hlen = skb_headlen(skb); - if (hlen - offset >= len) - return skb->data + offset; + return data + offset; - if (skb_copy_bits(skb, offset, buffer, len) < 0) + if (!skb || + skb_copy_bits(skb, offset, buffer, len) < 0) return NULL; return buffer; } +static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, + int len, void *buffer) +{ + return __skb_header_pointer(skb, offset, len, skb->data, + skb_headlen(skb), buffer); +} + /** * skb_needs_linearize - check if we need to linearize a given skb * depending on the given device features. @@ -2671,6 +2706,8 @@ static inline ktime_t net_invalid_timestamp(void) return ktime_set(0, 0); } +struct sk_buff *skb_clone_sk(struct sk_buff *skb); + #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING void skb_clone_tx_timestamp(struct sk_buff *skb); @@ -2786,6 +2823,42 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 0 : __skb_checksum_complete(skb); } +static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (skb->csum_level == 0) + skb->ip_summed = CHECKSUM_NONE; + else + skb->csum_level--; + } +} + +static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + if (skb->csum_level < SKB_MAX_CSUM_LEVEL) + skb->csum_level++; + } else if (skb->ip_summed == CHECKSUM_NONE) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 0; + } +} + +static inline void __skb_mark_checksum_bad(struct sk_buff *skb) +{ + /* Mark current checksum as bad (typically called from GRO + * path). In the case that ip_summed is CHECKSUM_NONE + * this must be the first checksum encountered in the packet. + * When ip_summed is CHECKSUM_UNNECESSARY, this is the first + * checksum after the last one validated. For UDP, a zero + * checksum can not be marked as bad. + */ + + if (skb->ip_summed == CHECKSUM_NONE || + skb->ip_summed == CHECKSUM_UNNECESSARY) + skb->csum_bad = 1; +} + /* Check if we need to perform checksum complete validation. * * Returns true if checksum complete is needed, false otherwise @@ -2797,6 +2870,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb, { if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { skb->csum_valid = 1; + __skb_decr_checksum_unnecessary(skb); return false; } @@ -2826,6 +2900,9 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb, skb->csum_valid = 1; return 0; } + } else if (skb->csum_bad) { + /* ip_summed == CHECKSUM_NONE in this case */ + return 1; } skb->csum = psum; @@ -2883,6 +2960,26 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto) #define skb_checksum_simple_validate(skb) \ __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) +static inline bool __skb_checksum_convert_check(struct sk_buff *skb) +{ + return (skb->ip_summed == CHECKSUM_NONE && + skb->csum_valid && !skb->csum_bad); +} + +static inline void __skb_checksum_convert(struct sk_buff *skb, + __sum16 check, __wsum pseudo) +{ + skb->csum = ~pseudo; + skb->ip_summed = CHECKSUM_COMPLETE; +} + +#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \ +do { \ + if (__skb_checksum_convert_check(skb)) \ + __skb_checksum_convert(skb, check, \ + compute_pseudo(skb, proto)); \ +} while (0) + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) void nf_conntrack_destroy(struct nf_conntrack *nfct); static inline void nf_conntrack_put(struct nf_conntrack *nfct) @@ -3137,7 +3234,9 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); int skb_checksum_setup(struct sk_buff *skb, bool recalculate); -u32 __skb_get_poff(const struct sk_buff *skb); +u32 skb_get_poff(const struct sk_buff *skb); +u32 __skb_get_poff(const struct sk_buff *skb, void *data, + const struct flow_keys *keys, int hlen); /** * skb_head_is_locked - Determine if the skb->head is locked down diff --git a/include/linux/tcp.h b/include/linux/tcp.h index fa5258f322e7..e567f0dbf282 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -276,7 +276,7 @@ struct tcp_sock { u32 retrans_stamp; /* Timestamp of the last retransmit, * also used in SYN-SENT to remember stamp of * the first SYN. */ - u32 undo_marker; /* tracking retrans started here. */ + u32 undo_marker; /* snd_una upon a new recovery episode. */ int undo_retrans; /* number of undoable retransmissions. */ u32 total_retrans; /* Total retransmits for entire connection */ diff --git a/include/linux/udp.h b/include/linux/udp.h index 247cfdcc4b08..ee3277593222 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -49,7 +49,11 @@ struct udp_sock { unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ + no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ + convert_csum:1;/* On receive, convert checksum + * unnecessary to checksum complete + * if possible. + */ /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -98,6 +102,16 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } +static inline void udp_set_convert_csum(struct sock *sk, bool val) +{ + udp_sk(sk)->convert_csum = val; +} + +static inline bool udp_get_convert_csum(struct sock *sk) +{ + return udp_sk(sk)->convert_csum; +} + #define udp_portaddr_for_each_entry(__sk, node, list) \ hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) diff --git a/include/net/addrconf.h b/include/net/addrconf.h index ec51e673b4b6..d13573bb879e 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -202,7 +202,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); void ipv6_sock_ac_close(struct sock *sk); -int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); +int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr); int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); void ipv6_ac_destroy_dev(struct inet6_dev *idev); bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 6f884e6c731e..b0ded1333865 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -302,7 +302,7 @@ struct hci_dev { __u32 req_status; __u32 req_result; - struct crypto_blkcipher *tfm_aes; + void *smp_data; struct discovery_state discovery; struct hci_conn_hash conn_hash; @@ -970,6 +970,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) +#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ + !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) + /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 @@ -1258,6 +1261,8 @@ bool hci_req_pending(struct hci_dev *hdev); void hci_req_add_le_scan_disable(struct hci_request *req); void hci_req_add_le_passive_scan(struct hci_request *req); +void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req); + struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u32 timeout); struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, @@ -1353,6 +1358,7 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len); void mgmt_discovering(struct hci_dev *hdev, u8 discovering); +bool mgmt_powering_down(struct hci_dev *hdev); void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk); void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 8df15ad0d43f..cedda399f9c0 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -625,6 +625,9 @@ struct l2cap_conn { struct delayed_work info_timer; + int disconn_err; + struct work_struct disconn_work; + struct sk_buff *rx_skb; __u32 rx_len; __u8 tx_ident; @@ -635,8 +638,7 @@ struct l2cap_conn { __u8 disc_reason; - struct delayed_work security_timer; - struct smp_chan *smp_chan; + struct l2cap_chan *smp; struct list_head chan_l; struct mutex chan_lock; @@ -708,6 +710,7 @@ enum { FLAG_EFS_ENABLE, FLAG_DEFER_SETUP, FLAG_LE_CONN_REQ_SENT, + FLAG_PENDING_SECURITY, }; enum { @@ -837,18 +840,43 @@ static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan return NULL; } +static inline int l2cap_chan_no_recv(struct l2cap_chan *chan, struct sk_buff *skb) +{ + return -ENOSYS; +} + +static inline struct sk_buff *l2cap_chan_no_alloc_skb(struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb) +{ + return ERR_PTR(-ENOSYS); +} + static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err) { } +static inline void l2cap_chan_no_close(struct l2cap_chan *chan) +{ +} + static inline void l2cap_chan_no_ready(struct l2cap_chan *chan) { } +static inline void l2cap_chan_no_state_change(struct l2cap_chan *chan, + int state, int err) +{ +} + static inline void l2cap_chan_no_defer(struct l2cap_chan *chan) { } +static inline void l2cap_chan_no_suspend(struct l2cap_chan *chan) +{ +} + static inline void l2cap_chan_no_resume(struct l2cap_chan *chan) { } @@ -918,6 +946,7 @@ void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, u8 status); void __l2cap_physical_cfm(struct l2cap_chan *chan, int result); +void l2cap_conn_shutdown(struct l2cap_conn *conn, int err); void l2cap_conn_get(struct l2cap_conn *conn); void l2cap_conn_put(struct l2cap_conn *conn); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 0a080c4de275..ab21299c8f4d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1503,12 +1503,14 @@ enum cfg80211_signal_type { * @tsf: TSF contained in the frame that carried these IEs * @rcu_head: internal use, for freeing * @len: length of the IEs + * @from_beacon: these IEs are known to come from a beacon * @data: IE data */ struct cfg80211_bss_ies { u64 tsf; struct rcu_head rcu_head; int len; + bool from_beacon; u8 data[]; }; @@ -3765,11 +3767,25 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, } /** - * cfg80211_inform_bss - inform cfg80211 of a new BSS + * enum cfg80211_bss_frame_type - frame type that the BSS data came from + * @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is + * from a beacon or probe response + * @CFG80211_BSS_FTYPE_BEACON: data comes from a beacon + * @CFG80211_BSS_FTYPE_PRESP: data comes from a probe response + */ +enum cfg80211_bss_frame_type { + CFG80211_BSS_FTYPE_UNKNOWN, + CFG80211_BSS_FTYPE_BEACON, + CFG80211_BSS_FTYPE_PRESP, +}; + +/** + * cfg80211_inform_bss_width - inform cfg80211 of a new BSS * * @wiphy: the wiphy reporting the BSS * @rx_channel: The channel the frame was received on * @scan_width: width of the control channel + * @ftype: frame type (if known) * @bssid: the BSSID of the BSS * @tsf: the TSF sent by the peer in the beacon/probe response (or 0) * @capability: the capability field sent by the peer @@ -3789,6 +3805,7 @@ struct cfg80211_bss * __must_check cfg80211_inform_bss_width(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, + enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp); @@ -3796,12 +3813,13 @@ cfg80211_inform_bss_width(struct wiphy *wiphy, static inline struct cfg80211_bss * __must_check cfg80211_inform_bss(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, + enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) { return cfg80211_inform_bss_width(wiphy, rx_channel, - NL80211_BSS_CHAN_WIDTH_20, + NL80211_BSS_CHAN_WIDTH_20, ftype, bssid, tsf, capability, beacon_interval, ie, ielen, signal, gfp); @@ -4412,7 +4430,6 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * @buf: Management frame (header + body) * @len: length of the frame data * @flags: flags, as defined in enum nl80211_rxmgmt_flags - * @gfp: context flags * * This function is called whenever an Action frame is received for a station * mode interface, but is not processed in kernel. @@ -4423,7 +4440,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * driver is responsible for rejecting the frame. */ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, - const u8 *buf, size_t len, u32 flags, gfp_t gfp); + const u8 *buf, size_t len, u32 flags); /** * cfg80211_mgmt_tx_status - notification of TX status for management frame diff --git a/include/net/codel.h b/include/net/codel.h index fe0eab32ce76..aeee28081245 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -66,7 +66,7 @@ typedef s32 codel_tdiff_t; static inline codel_time_t codel_get_time(void) { - u64 ns = ktime_to_ns(ktime_get()); + u64 ns = ktime_get_ns(); return ns >> CODEL_SHIFT; } diff --git a/include/net/dsa.h b/include/net/dsa.h index 6efce384451e..d8054fb4a4df 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -15,6 +15,17 @@ #include <linux/list.h> #include <linux/timer.h> #include <linux/workqueue.h> +#include <linux/of.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> + +enum dsa_tag_protocol { + DSA_TAG_PROTO_NONE = 0, + DSA_TAG_PROTO_DSA, + DSA_TAG_PROTO_TRAILER, + DSA_TAG_PROTO_EDSA, + DSA_TAG_PROTO_BRCM, +}; #define DSA_MAX_SWITCHES 4 #define DSA_MAX_PORTS 12 @@ -23,9 +34,15 @@ struct dsa_chip_data { /* * How to access the switch configuration registers. */ - struct device *mii_bus; + struct device *host_dev; int sw_addr; + /* Device tree node pointer for this specific switch chip + * used during switch setup in case additional properties + * and resources needs to be used + */ + struct device_node *of_node; + /* * The names of the switch's ports. Use "cpu" to * designate the switch port that the cpu is connected to, @@ -34,6 +51,7 @@ struct dsa_chip_data { * or any other string to indicate this is a physical port. */ char *port_names[DSA_MAX_PORTS]; + struct device_node *port_dn[DSA_MAX_PORTS]; /* * An array (with nr_chips elements) of which element [a] @@ -59,6 +77,8 @@ struct dsa_platform_data { struct dsa_chip_data *chip; }; +struct packet_type; + struct dsa_switch_tree { /* * Configuration data for the platform device that owns @@ -71,7 +91,11 @@ struct dsa_switch_tree { * protocol to use. */ struct net_device *master_netdev; - __be16 tag_protocol; + int (*rcv)(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev); + enum dsa_tag_protocol tag_protocol; /* * The switch and port to which the CPU is attached. @@ -110,15 +134,16 @@ struct dsa_switch { struct dsa_switch_driver *drv; /* - * Reference to mii bus to use. + * Reference to host device to use. */ - struct mii_bus *master_mii_bus; + struct device *master_dev; /* * Slave mii_bus and devices for the individual ports. */ u32 dsa_port_mask; u32 phys_port_mask; + u32 phys_mii_mask; struct mii_bus *slave_mii_bus; struct net_device *ports[DSA_MAX_PORTS]; }; @@ -147,15 +172,16 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) struct dsa_switch_driver { struct list_head list; - __be16 tag_protocol; + enum dsa_tag_protocol tag_protocol; int priv_size; /* * Probing and setup. */ - char *(*probe)(struct mii_bus *bus, int sw_addr); + char *(*probe)(struct device *host_dev, int sw_addr); int (*setup)(struct dsa_switch *ds); int (*set_addr)(struct dsa_switch *ds, u8 *addr); + u32 (*get_phy_flags)(struct dsa_switch *ds, int port); /* * Access to the switch's PHY registers. @@ -170,37 +196,47 @@ struct dsa_switch_driver { void (*poll_link)(struct dsa_switch *ds); /* + * Link state adjustment (called from libphy) + */ + void (*adjust_link)(struct dsa_switch *ds, int port, + struct phy_device *phydev); + void (*fixed_link_update)(struct dsa_switch *ds, int port, + struct fixed_phy_status *st); + + /* * ethtool hardware statistics. */ void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); void (*get_ethtool_stats)(struct dsa_switch *ds, int port, uint64_t *data); int (*get_sset_count)(struct dsa_switch *ds); + + /* + * ethtool Wake-on-LAN + */ + void (*get_wol)(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *w); + int (*set_wol)(struct dsa_switch *ds, int port, + struct ethtool_wolinfo *w); + + /* + * Suspend and resume + */ + int (*suspend)(struct dsa_switch *ds); + int (*resume)(struct dsa_switch *ds); }; void register_switch_driver(struct dsa_switch_driver *type); void unregister_switch_driver(struct dsa_switch_driver *type); +struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev); static inline void *ds_to_priv(struct dsa_switch *ds) { return (void *)(ds + 1); } -/* - * The original DSA tag format and some other tag formats have no - * ethertype, which means that we need to add a little hack to the - * networking receive path to make sure that received frames get - * the right ->protocol assigned to them when one of those tag - * formats is in use. - */ -static inline bool dsa_uses_dsa_tags(struct dsa_switch_tree *dst) +static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) { - return !!(dst->tag_protocol == htons(ETH_P_DSA)); + return dst->rcv != NULL; } - -static inline bool dsa_uses_trailer_tags(struct dsa_switch_tree *dst) -{ - return !!(dst->tag_protocol == htons(ETH_P_TRAILER)); -} - #endif diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h index 6667a054763a..7ee2df083542 100644 --- a/include/net/flow_keys.h +++ b/include/net/flow_keys.h @@ -27,7 +27,19 @@ struct flow_keys { u8 ip_proto; }; -bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow); -__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto); +bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, + void *data, __be16 proto, int nhoff, int hlen); +static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) +{ + return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0); +} +__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + void *data, int hlen_proto); +static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) +{ + return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); +} u32 flow_hash_from_keys(struct flow_keys *keys); +unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len, + __be16 protocol); #endif diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 5fbe6568c3cf..848e85cb5c61 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -242,6 +242,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, #endif } +static inline unsigned long +inet_csk_rto_backoff(const struct inet_connection_sock *icsk, + unsigned long max_when) +{ + u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff; + + return (unsigned long)min_t(u64, when, max_when); +} + struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); struct request_sock *inet_csk_search_req(const struct sock *sk, diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 01d590ee5e7e..80479abddf73 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -61,7 +61,6 @@ struct inet_peer { struct inet_peer_base { struct inet_peer __rcu *root; seqlock_t lock; - u32 flush_seq; int total; }; diff --git a/include/net/ip.h b/include/net/ip.h index db4a771b9ef3..14bfc8e1bcf9 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -229,8 +229,6 @@ static inline int inet_is_local_reserved_port(struct net *net, int port) } #endif -extern int sysctl_ip_nonlocal_bind; - /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; @@ -364,6 +362,14 @@ static inline void inet_set_txhash(struct sock *sk) sk->sk_txhash = flow_hash_from_keys(&keys); } +static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto) +{ + const struct iphdr *iph = skb_gro_network_header(skb); + + return csum_tcpudp_nofold(iph->saddr, iph->daddr, + skb_gro_len(skb), proto, 0); +} + /* * Map a multicast IP onto multicast MAC for type ethernet. */ diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 55236cb71174..1a49b73f7f6e 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -48,6 +48,14 @@ static inline __wsum ip6_compute_pseudo(struct sk_buff *skb, int proto) skb->len, proto, 0)); } +static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto) +{ + const struct ipv6hdr *iph = skb_gro_network_header(skb); + + return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr, + skb_gro_len(skb), proto, 0)); +} + static __inline__ __sum16 tcp_v6_check(int len, const struct in6_addr *saddr, const struct in6_addr *daddr, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9922093f575e..dc9d2a27c315 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -65,7 +65,8 @@ struct fnhe_hash_bucket { struct fib_nh_exception __rcu *chain; }; -#define FNHE_HASH_SIZE 2048 +#define FNHE_HASH_SHIFT 11 +#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT) #define FNHE_RECLAIM_DEPTH 5 struct fib_nh { @@ -87,7 +88,7 @@ struct fib_nh { int nh_saddr_genid; struct rtable __rcu * __percpu *nh_pcpu_rth_output; struct rtable __rcu *nh_rth_input; - struct fnhe_hash_bucket *nh_exceptions; + struct fnhe_hash_bucket __rcu *nh_exceptions; }; /* diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 8dd8cab88b87..7f538ba6e267 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -10,6 +10,7 @@ #include <net/gro_cells.h> #include <net/inet_ecn.h> #include <net/ip.h> +#include <net/netns/generic.h> #include <net/rtnetlink.h> #if IS_ENABLED(CONFIG_IPV6) @@ -31,6 +32,13 @@ struct ip_tunnel_6rd_parm { }; #endif +struct ip_tunnel_encap { + __u16 type; + __u16 flags; + __be16 sport; + __be16 dport; +}; + struct ip_tunnel_prl_entry { struct ip_tunnel_prl_entry __rcu *next; __be32 addr; @@ -56,13 +64,18 @@ struct ip_tunnel { /* These four fields used only by GRE */ __u32 i_seqno; /* The last seen seqno */ __u32 o_seqno; /* The last output seqno */ - int hlen; /* Precalculated header length */ + int tun_hlen; /* Precalculated header length */ int mlink; struct ip_tunnel_dst __percpu *dst_cache; struct ip_tunnel_parm parms; + int encap_hlen; /* Encap header length (FOU,GUE) */ + struct ip_tunnel_encap encap; + + int hlen; /* tun_hlen + encap_hlen */ + /* for SIT */ #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd_parm ip6rd; @@ -114,6 +127,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops); void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, const u8 protocol); int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); +int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, + u8 *protocol, struct flowi4 *fl4); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, @@ -131,6 +146,8 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p); void ip_tunnel_setup(struct net_device *dev, int net_id); void ip_tunnel_dst_reset_all(struct ip_tunnel *t); +int ip_tunnel_encap_setup(struct ip_tunnel *t, + struct ip_tunnel_encap *ipencap); /* Extract dsfield from inner protocol */ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index a2db816e8461..7e247e9b8765 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -121,6 +121,7 @@ struct frag_hdr { /* sysctls */ extern int sysctl_mld_max_msf; +extern int sysctl_mld_qrv; #define _DEVINC(net, statname, modifier, idev, field) \ ({ \ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index dae2e24616e1..c9b2bec8db47 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1226,7 +1226,8 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the * driver to indicate that it requires IV generation for this - * particular key. + * particular key. Setting this flag does not necessarily mean that SKBs + * will have sufficient tailroom for ICV or MIC. * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by * the driver for a TKIP key if it requires Michael MIC * generation in software. @@ -1238,7 +1239,9 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver * if space should be prepared for the IV, but the IV * itself should not be generated. Do not set together with - * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does + * not necessarily mean that SKBs will have sufficient tailroom for ICV or + * MIC. * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received * management frames. The flag can help drivers that have a hardware * crypto implementation that doesn't deal with management frames @@ -1405,7 +1408,7 @@ struct ieee80211_sta_rates { * @supp_rates: Bitmap of supported rates (per band) * @ht_cap: HT capabilities of this STA; restricted to our own capabilities * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities - * @wme: indicates whether the STA supports WME. Only valid during AP-mode. + * @wme: indicates whether the STA supports QoS/WME. * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *), size is determined in hw information. * @uapsd_queues: bitmap of queues configured for uapsd. Only valid @@ -1606,6 +1609,9 @@ struct ieee80211_tx_control { * is not enabled the default action is to disconnect when getting the * CSA frame. * + * @IEEE80211_HW_SUPPORTS_CLONED_SKBS: The driver will never modify the payload + * or tailroom of TX skbs without copying them first. + * * @IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands * in one command, mac80211 doesn't have to run separate scans per band. */ @@ -1639,7 +1645,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27, IEEE80211_HW_CHANCTX_STA_CSA = 1<<28, - /* bit 29 unused */ + IEEE80211_HW_SUPPORTS_CLONED_SKBS = 1<<29, IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS = 1<<30, }; diff --git a/include/net/mld.h b/include/net/mld.h index faa1d161bf24..01d751303498 100644 --- a/include/net/mld.h +++ b/include/net/mld.h @@ -88,12 +88,15 @@ struct mld2_query { #define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07) #define MLDV2_QQIC_MAN(value) ((value) & 0x0f) +#define MLD_EXP_MIN_LIMIT 32768UL +#define MLDV1_MRD_MAX_COMPAT (MLD_EXP_MIN_LIMIT - 1) + static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2) { /* RFC3810, 5.1.3. Maximum Response Code */ unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc); - if (mc_mrc < 32768) { + if (mc_mrc < MLD_EXP_MIN_LIMIT) { ret = mc_mrc; } else { unsigned long mc_man, mc_exp; diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h new file mode 100644 index 000000000000..a9c001c646da --- /dev/null +++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h @@ -0,0 +1,14 @@ +#ifndef _NF_NAT_MASQUERADE_IPV4_H_ +#define _NF_NAT_MASQUERADE_IPV4_H_ + +#include <net/netfilter/nf_nat.h> + +unsigned int +nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, + const struct nf_nat_range *range, + const struct net_device *out); + +void nf_nat_masquerade_ipv4_register_notifier(void); +void nf_nat_masquerade_ipv4_unregister_notifier(void); + +#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */ diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h new file mode 100644 index 000000000000..0a13396cd390 --- /dev/null +++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h @@ -0,0 +1,10 @@ +#ifndef _NF_NAT_MASQUERADE_IPV6_H_ +#define _NF_NAT_MASQUERADE_IPV6_H_ + +unsigned int +nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, + const struct net_device *out); +void nf_nat_masquerade_ipv6_register_notifier(void); +void nf_nat_masquerade_ipv6_unregister_notifier(void); + +#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */ diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h index 5a2919b2e09a..340c013795a4 100644 --- a/include/net/netfilter/nf_nat_l3proto.h +++ b/include/net/netfilter/nf_nat_l3proto.h @@ -42,8 +42,83 @@ const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto); int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum); + +unsigned int nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum, unsigned int hdrlen); +unsigned int nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + +unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)); + #endif /* _NF_NAT_L3PROTO_H */ diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h new file mode 100644 index 000000000000..c72729f954f4 --- /dev/null +++ b/include/net/netfilter/nft_masq.h @@ -0,0 +1,16 @@ +#ifndef _NFT_MASQ_H_ +#define _NFT_MASQ_H_ + +struct nft_masq { + u32 flags; +}; + +extern const struct nla_policy nft_masq_policy[]; + +int nft_masq_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]); + +int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr); + +#endif /* _NFT_MASQ_H_ */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index aec5e12f9f19..24945cefc4fd 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -76,6 +76,7 @@ struct netns_ipv4 { int sysctl_tcp_ecn; int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; + int sysctl_ip_nonlocal_bind; int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index ec030cd76616..8bbe626e9ece 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -50,7 +50,7 @@ typedef long psched_tdiff_t; static inline psched_time_t psched_get_time(void) { - return PSCHED_NS2TICKS(ktime_to_ns(ktime_get())); + return PSCHED_NS2TICKS(ktime_get_ns()); } static inline psched_tdiff_t diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 620e086c0cbe..e65b8e0752af 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -143,7 +143,7 @@ struct Qdisc_class_ops { void (*walk)(struct Qdisc *, struct qdisc_walker * arg); /* Filter manipulation */ - struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long); + struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid); void (*unbind_tcf)(struct Qdisc *, unsigned long); @@ -212,8 +212,8 @@ struct tcf_proto_ops { struct tcf_proto { /* Fast access part */ - struct tcf_proto *next; - void *root; + struct tcf_proto __rcu *next; + void __rcu *root; int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *); @@ -225,6 +225,7 @@ struct tcf_proto { struct Qdisc *q; void *data; const struct tcf_proto_ops *ops; + struct rcu_head rcu; }; struct qdisc_skb_cb { @@ -260,7 +261,9 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc) static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) { - return qdisc->dev_queue->qdisc; + struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); + + return q; } static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) @@ -377,7 +380,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab); void tcf_destroy(struct tcf_proto *tp); -void tcf_destroy_chain(struct tcf_proto **fl); +void tcf_destroy_chain(struct tcf_proto __rcu **fl); /* Reset all TX qdiscs greater then index of a device. */ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) @@ -385,7 +388,7 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) struct Qdisc *qdisc; for (; i < dev->num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); if (qdisc) { spin_lock_bh(qdisc_lock(qdisc)); qdisc_reset(qdisc); @@ -403,13 +406,18 @@ static inline void qdisc_reset_all_tx(struct net_device *dev) static inline bool qdisc_all_tx_empty(const struct net_device *dev) { unsigned int i; + + rcu_read_lock(); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - const struct Qdisc *q = txq->qdisc; + const struct Qdisc *q = rcu_dereference(txq->qdisc); - if (q->q.qlen) + if (q->q.qlen) { + rcu_read_unlock(); return false; + } } + rcu_read_unlock(); return true; } @@ -417,9 +425,10 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev) static inline bool qdisc_tx_changing(const struct net_device *dev) { unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (txq->qdisc != txq->qdisc_sleeping) + if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) return true; } return false; @@ -429,9 +438,10 @@ static inline bool qdisc_tx_changing(const struct net_device *dev) static inline bool qdisc_tx_is_noop(const struct net_device *dev) { unsigned int i; + for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (txq->qdisc != &noop_qdisc) + if (rcu_access_pointer(txq->qdisc) != &noop_qdisc) return false; } return true; diff --git a/include/net/snmp.h b/include/net/snmp.h index f1f27fdbb0d5..8fd2f498782e 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -146,19 +146,15 @@ struct linux_xfrm_mib { #define SNMP_ADD_STATS(mib, field, addend) \ this_cpu_add(mib->mibs[field], addend) -/* - * Use "__typeof__(*mib) *ptr" instead of "__typeof__(mib) ptr" - * to make @ptr a non-percpu pointer. - */ #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ do { \ - __typeof__(*mib->mibs) *ptr = mib->mibs; \ + __typeof__((mib->mibs) + 0) ptr = mib->mibs; \ this_cpu_inc(ptr[basefield##PKTS]); \ this_cpu_add(ptr[basefield##OCTETS], addend); \ } while (0) #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ do { \ - __typeof__(*mib->mibs) *ptr = mib->mibs; \ + __typeof__((mib->mibs) + 0) ptr = mib->mibs; \ __this_cpu_inc(ptr[basefield##PKTS]); \ __this_cpu_add(ptr[basefield##OCTETS], addend); \ } while (0) diff --git a/include/net/sock.h b/include/net/sock.h index b9a5bd0ed9f3..515a4d01e932 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1574,7 +1574,12 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, void sock_wfree(struct sk_buff *skb); void skb_orphan_partial(struct sk_buff *skb); void sock_rfree(struct sk_buff *skb); +void sock_efree(struct sk_buff *skb); +#ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); +#else +#define sock_edemux(skb) sock_efree(skb) +#endif int sock_setsockopt(struct socket *sock, int level, int op, char __user *optval, unsigned int optlen); @@ -2041,6 +2046,7 @@ void sk_stop_timer(struct sock *sk, struct timer_list *timer); int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); +struct sk_buff *sock_dequeue_err_skb(struct sock *sk); /* * Recover an error report and clear atomically @@ -2193,6 +2199,8 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, sk->sk_stamp = skb->tstamp; } +void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); + /** * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @sk: socket sending this packet @@ -2200,7 +2208,13 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, * * Note : callers should take care of initial *tx_flags value (usually 0) */ -void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); +static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) +{ + if (unlikely(sk->sk_tsflags)) + __sock_tx_timestamp(sk, tx_flags); + if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) + *tx_flags |= SKBTX_WIFI_STATUS; +} /** * sk_eat_skb - Release a skb if it is no longer needed diff --git a/include/net/tcp.h b/include/net/tcp.h index 590e01a476ac..a4201ef216e8 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -672,6 +672,12 @@ void tcp_send_window_probe(struct sock *sk); */ #define tcp_time_stamp ((__u32)(jiffies)) +static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) +{ + return skb->skb_mstamp.stamp_jiffies; +} + + #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) #define TCPHDR_FIN 0x01 @@ -698,7 +704,7 @@ struct tcp_skb_cb { } header; /* For incoming frames */ __u32 seq; /* Starting sequence number */ __u32 end_seq; /* SEQ + FIN + SYN + datalen */ - __u32 when; /* used to compute rtt's */ + __u32 tcp_tw_isn; /* isn chosen by tcp_timewait_state_process() */ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 sacked; /* State flags for SACK/FACK. */ diff --git a/include/net/udp.h b/include/net/udp.h index 70f941368ace..16f4e80f0519 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -158,6 +158,24 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr, void udp_set_csum(bool nocheck, struct sk_buff *skb, __be32 saddr, __be32 daddr, int len); +struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, + struct udphdr *uh); +int udp_gro_complete(struct sk_buff *skb, int nhoff); + +static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) +{ + struct udphdr *uh; + unsigned int hlen, off; + + off = skb_gro_offset(skb); + hlen = off + sizeof(*uh); + uh = skb_gro_header_fast(skb, off); + if (skb_gro_header_hard(skb, hlen)) + uh = skb_gro_header_slow(skb, hlen, off); + + return uh; +} + /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ static inline void udp_lib_hash(struct sock *sk) { diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index ffd69cbded35..a47790bcaa38 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -1,6 +1,14 @@ #ifndef __NET_UDP_TUNNEL_H #define __NET_UDP_TUNNEL_H +#include <net/ip_tunnels.h> +#include <net/udp.h> + +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ipv6.h> +#include <net/addrconf.h> +#endif + struct udp_port_cfg { u8 family; @@ -26,7 +34,80 @@ struct udp_port_cfg { use_udp6_rx_checksums:1; }; -int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, - struct socket **sockp); +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp); +#else +static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + return 0; +} +#endif + +static inline int udp_sock_create(struct net *net, + struct udp_port_cfg *cfg, + struct socket **sockp) +{ + if (cfg->family == AF_INET) + return udp_sock_create4(net, cfg, sockp); + + if (cfg->family == AF_INET6) + return udp_sock_create6(net, cfg, sockp); + + return -EPFNOSUPPORT; +} + +typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); +typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); + +struct udp_tunnel_sock_cfg { + void *sk_user_data; /* user data used by encap_rcv call back */ + /* Used for setting up udp_sock fields, see udp.h for details */ + __u8 encap_type; + udp_tunnel_encap_rcv_t encap_rcv; + udp_tunnel_encap_destroy_t encap_destroy; +}; + +/* Setup the given (UDP) sock to receive UDP encapsulated packets */ +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *sock_cfg); + +/* Transmit the skb using UDP encapsulation. */ +int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, + __u8 tos, __u8 ttl, __be16 df, __be16 src_port, + __be16 dst_port, bool xnet); + +#if IS_ENABLED(CONFIG_IPV6) +int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, + struct sk_buff *skb, struct net_device *dev, + struct in6_addr *saddr, struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be16 src_port, + __be16 dst_port); +#endif + +void udp_tunnel_sock_release(struct socket *sock); + +static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, + bool udp_csum) +{ + int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + + return iptunnel_handle_offloads(skb, udp_csum, type); +} + +static inline void udp_tunnel_encap_enable(struct socket *sock) +{ +#if IS_ENABLED(CONFIG_IPV6) + if (sock->sk->sk_family == PF_INET6) + ipv6_stub->udpv6_encap_enable(); + else +#endif + udp_encap_enable(); +} #endif diff --git a/include/rxrpc/types.h b/include/rxrpc/types.h deleted file mode 100644 index 30d48f6da228..000000000000 --- a/include/rxrpc/types.h +++ /dev/null @@ -1,41 +0,0 @@ -/* types.h: Rx types - * - * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_TYPES_H -#define _LINUX_RXRPC_TYPES_H - -#include <linux/types.h> -#include <linux/list.h> -#include <linux/socket.h> -#include <linux/in.h> -#include <linux/spinlock.h> -#include <linux/atomic.h> - -typedef uint32_t rxrpc_seq_t; /* Rx message sequence number */ -typedef uint32_t rxrpc_serial_t; /* Rx message serial number */ -typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */ -typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */ - -struct rxrpc_call; -struct rxrpc_connection; -struct rxrpc_header; -struct rxrpc_message; -struct rxrpc_operation; -struct rxrpc_peer; -struct rxrpc_service; -typedef struct rxrpc_timer rxrpc_timer_t; -struct rxrpc_transport; - -typedef void (*rxrpc_call_attn_func_t)(struct rxrpc_call *call); -typedef void (*rxrpc_call_error_func_t)(struct rxrpc_call *call); -typedef void (*rxrpc_call_aemap_func_t)(struct rxrpc_call *call); - -#endif /* _LINUX_RXRPC_TYPES_H */ diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index be88166349a1..70e150ebc6c9 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -67,6 +67,7 @@ header-y += bfs_fs.h header-y += binfmts.h header-y += blkpg.h header-y += blktrace_api.h +header-y += bpf.h header-y += bpqether.h header-y += bsg.h header-y += btrfs.h diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h new file mode 100644 index 000000000000..479ed0b6be16 --- /dev/null +++ b/include/uapi/linux/bpf.h @@ -0,0 +1,65 @@ +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _UAPI__LINUX_BPF_H__ +#define _UAPI__LINUX_BPF_H__ + +#include <linux/types.h> + +/* Extended instruction set based on top of classic BPF */ + +/* instruction classes */ +#define BPF_ALU64 0x07 /* alu mode in double word width */ + +/* ld/ldx fields */ +#define BPF_DW 0x18 /* double word */ +#define BPF_XADD 0xc0 /* exclusive add */ + +/* alu/jmp fields */ +#define BPF_MOV 0xb0 /* mov reg to reg */ +#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ + +/* change endianness of a register */ +#define BPF_END 0xd0 /* flags for endianness conversion: */ +#define BPF_TO_LE 0x00 /* convert to little-endian */ +#define BPF_TO_BE 0x08 /* convert to big-endian */ +#define BPF_FROM_LE BPF_TO_LE +#define BPF_FROM_BE BPF_TO_BE + +#define BPF_JNE 0x50 /* jump != */ +#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ +#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ +#define BPF_CALL 0x80 /* function call */ +#define BPF_EXIT 0x90 /* function return */ + +/* Register numbers */ +enum { + BPF_REG_0 = 0, + BPF_REG_1, + BPF_REG_2, + BPF_REG_3, + BPF_REG_4, + BPF_REG_5, + BPF_REG_6, + BPF_REG_7, + BPF_REG_8, + BPF_REG_9, + BPF_REG_10, + __MAX_BPF_REG, +}; + +/* BPF has 10 general purpose 64-bit registers and stack frame. */ +#define MAX_BPF_REG __MAX_BPF_REG + +struct bpf_insn { + __u8 code; /* opcode */ + __u8 dst_reg:4; /* dest register */ + __u8 src_reg:4; /* source register */ + __s16 off; /* signed offset */ + __s32 imm; /* signed immediate constant */ +}; + +#endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index e3c7a719c76b..7a364f2f3d3f 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -209,6 +209,32 @@ struct ethtool_value { __u32 data; }; +enum tunable_id { + ETHTOOL_ID_UNSPEC, + ETHTOOL_RX_COPYBREAK, +}; + +enum tunable_type_id { + ETHTOOL_TUNABLE_UNSPEC, + ETHTOOL_TUNABLE_U8, + ETHTOOL_TUNABLE_U16, + ETHTOOL_TUNABLE_U32, + ETHTOOL_TUNABLE_U64, + ETHTOOL_TUNABLE_STRING, + ETHTOOL_TUNABLE_S8, + ETHTOOL_TUNABLE_S16, + ETHTOOL_TUNABLE_S32, + ETHTOOL_TUNABLE_S64, +}; + +struct ethtool_tunable { + __u32 cmd; + __u32 id; + __u32 type_id; + __u32 len; + void *data[0]; +}; + /** * struct ethtool_regs - hardware register dump * @cmd: Command number = %ETHTOOL_GREGS @@ -1152,6 +1178,8 @@ enum ethtool_sfeatures_retval_bits { #define ETHTOOL_GRSSH 0x00000046 /* Get RX flow hash configuration */ #define ETHTOOL_SRSSH 0x00000047 /* Set RX flow hash configuration */ +#define ETHTOOL_GTUNABLE 0x00000048 /* Get tunable configuration */ +#define ETHTOOL_STUNABLE 0x00000049 /* Set tunable configuration */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h new file mode 100644 index 000000000000..e03376de453d --- /dev/null +++ b/include/uapi/linux/fou.h @@ -0,0 +1,32 @@ +/* fou.h - FOU Interface */ + +#ifndef _UAPI_LINUX_FOU_H +#define _UAPI_LINUX_FOU_H + +/* NETLINK_GENERIC related info + */ +#define FOU_GENL_NAME "fou" +#define FOU_GENL_VERSION 0x1 + +enum { + FOU_ATTR_UNSPEC, + FOU_ATTR_PORT, /* u16 */ + FOU_ATTR_AF, /* u8 */ + FOU_ATTR_IPPROTO, /* u8 */ + + __FOU_ATTR_MAX, +}; + +#define FOU_ATTR_MAX (__FOU_ATTR_MAX - 1) + +enum { + FOU_CMD_UNSPEC, + FOU_CMD_ADD, + FOU_CMD_DEL, + + __FOU_CMD_MAX, +}; + +#define FOU_CMD_MAX (__FOU_CMD_MAX - 1) + +#endif /* _UAPI_LINUX_FOU_H */ diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 0f8210b8e0bc..aa63ed023c2b 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -128,6 +128,7 @@ #define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ #define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */ #define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */ +#define ETH_P_XDSA 0x00F8 /* Multiplexed DSA protocol */ /* * This is an Ethernet frame header. diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index ff957604a721..c80f95f6ee78 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -215,6 +215,18 @@ enum in6_addr_gen_mode { IN6_ADDR_GEN_MODE_NONE, }; +/* Bridge section */ + +enum { + IFLA_BR_UNSPEC, + IFLA_BR_FORWARD_DELAY, + IFLA_BR_HELLO_TIME, + IFLA_BR_MAX_AGE, + __IFLA_BR_MAX, +}; + +#define IFLA_BR_MAX (__IFLA_BR_MAX - 1) + enum { BRIDGE_MODE_UNSPEC, BRIDGE_MODE_HAIRPIN, diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 3bce9e9d9f7c..7c832afdfa94 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -53,10 +53,22 @@ enum { IFLA_IPTUN_6RD_RELAY_PREFIX, IFLA_IPTUN_6RD_PREFIXLEN, IFLA_IPTUN_6RD_RELAY_PREFIXLEN, + IFLA_IPTUN_ENCAP_TYPE, + IFLA_IPTUN_ENCAP_FLAGS, + IFLA_IPTUN_ENCAP_SPORT, + IFLA_IPTUN_ENCAP_DPORT, __IFLA_IPTUN_MAX, }; #define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1) +enum tunnel_encap_types { + TUNNEL_ENCAP_NONE, + TUNNEL_ENCAP_FOU, +}; + +#define TUNNEL_ENCAP_FLAG_CSUM (1<<0) +#define TUNNEL_ENCAP_FLAG_CSUM6 (1<<1) + /* SIT-mode i_flags */ #define SIT_ISATAP 0x0001 @@ -94,6 +106,10 @@ enum { IFLA_GRE_ENCAP_LIMIT, IFLA_GRE_FLOWINFO, IFLA_GRE_FLAGS, + IFLA_GRE_ENCAP_TYPE, + IFLA_GRE_ENCAP_FLAGS, + IFLA_GRE_ENCAP_SPORT, + IFLA_GRE_ENCAP_DPORT, __IFLA_GRE_MAX, }; diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h index 1ad3659102b6..0880781ad7b6 100644 --- a/include/uapi/linux/netfilter/nf_nat.h +++ b/include/uapi/linux/netfilter/nf_nat.h @@ -13,6 +13,11 @@ #define NF_NAT_RANGE_PROTO_RANDOM_ALL \ (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY) + struct nf_nat_ipv4_range { unsigned int flags; __be32 min_ip; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 801bdd1e56e3..eeec0ae845ef 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -571,6 +571,10 @@ enum nft_exthdr_attributes { * @NFT_META_L4PROTO: layer 4 protocol number * @NFT_META_BRI_IIFNAME: packet input bridge interface name * @NFT_META_BRI_OIFNAME: packet output bridge interface name + * @NFT_META_PKTTYPE: packet type (skb->pkt_type), special handling for loopback + * @NFT_META_CPU: cpu id through smp_processor_id() + * @NFT_META_IIFGROUP: packet input interface group + * @NFT_META_OIFGROUP: packet output interface group */ enum nft_meta_keys { NFT_META_LEN, @@ -592,6 +596,10 @@ enum nft_meta_keys { NFT_META_L4PROTO, NFT_META_BRI_IIFNAME, NFT_META_BRI_OIFNAME, + NFT_META_PKTTYPE, + NFT_META_CPU, + NFT_META_IIFGROUP, + NFT_META_OIFGROUP, }; /** @@ -777,6 +785,7 @@ enum nft_nat_types { * @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers) * @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers) * @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers) + * @NFTA_NAT_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) */ enum nft_nat_attributes { NFTA_NAT_UNSPEC, @@ -786,8 +795,20 @@ enum nft_nat_attributes { NFTA_NAT_REG_ADDR_MAX, NFTA_NAT_REG_PROTO_MIN, NFTA_NAT_REG_PROTO_MAX, + NFTA_NAT_FLAGS, __NFTA_NAT_MAX }; #define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1) +/** + * enum nft_masq_attributes - nf_tables masquerade expression attributes + * + * @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32) + */ +enum nft_masq_attributes { + NFTA_MASQ_FLAGS, + __NFTA_MASQ_MAX +}; +#define NFTA_MASQ_MAX (__NFTA_MASQ_MAX - 1) + #endif /* _LINUX_NF_TABLES_H */ diff --git a/include/uapi/linux/netfilter/nfnetlink_acct.h b/include/uapi/linux/netfilter/nfnetlink_acct.h index 51404ec19022..f3e34dbbf966 100644 --- a/include/uapi/linux/netfilter/nfnetlink_acct.h +++ b/include/uapi/linux/netfilter/nfnetlink_acct.h @@ -28,9 +28,17 @@ enum nfnl_acct_type { NFACCT_USE, NFACCT_FLAGS, NFACCT_QUOTA, + NFACCT_FILTER, __NFACCT_MAX }; #define NFACCT_MAX (__NFACCT_MAX - 1) +enum nfnl_attr_filter_type { + NFACCT_FILTER_UNSPEC, + NFACCT_FILTER_MASK, + NFACCT_FILTER_VALUE, + __NFACCT_FILTER_MAX +}; +#define NFACCT_FILTER_MAX (__NFACCT_FILTER_MAX - 1) #endif /* _UAPI_NFNL_ACCT_H_ */ diff --git a/include/uapi/linux/netfilter_arp/arpt_mangle.h b/include/uapi/linux/netfilter_arp/arpt_mangle.h index 250f502902bb..8c2b16a1f5a0 100644 --- a/include/uapi/linux/netfilter_arp/arpt_mangle.h +++ b/include/uapi/linux/netfilter_arp/arpt_mangle.h @@ -13,7 +13,7 @@ struct arpt_mangle union { struct in_addr tgt_ip; } u_t; - u_int8_t flags; + __u8 flags; int target; }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f1db15b9c041..d097568da690 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3055,14 +3055,20 @@ enum nl80211_bss_scan_width { * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets) * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) + * (if @NL80211_BSS_PRESP_DATA is present then this is known to be + * from a probe response, otherwise it may be from the same beacon + * that the NL80211_BSS_BEACON_TSF will be from) * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) * @NL80211_BSS_CAPABILITY: capability field (CPU order, u16) * @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the * raw information elements from the probe response/beacon (bin); - * if the %NL80211_BSS_BEACON_IES attribute is present, the IEs here are - * from a Probe Response frame; otherwise they are from a Beacon frame. + * if the %NL80211_BSS_BEACON_IES attribute is present and the data is + * different then the IEs here are from a Probe Response frame; otherwise + * they are from a Beacon frame. * However, if the driver does not indicate the source of the IEs, these * IEs may be from either frame subtype. + * If present, the @NL80211_BSS_PRESP_DATA attribute indicates that the + * data here is known to be from a probe response, without any heuristics. * @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon * in mBm (100 * dBm) (s32) * @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon @@ -3074,6 +3080,10 @@ enum nl80211_bss_scan_width { * yet been received * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel * (u32, enum nl80211_bss_scan_width) + * @NL80211_BSS_BEACON_TSF: TSF of the last received beacon (u64) + * (not present if no beacon frame has been received yet) + * @NL80211_BSS_PRESP_DATA: the data in @NL80211_BSS_INFORMATION_ELEMENTS and + * @NL80211_BSS_TSF is known to be from a probe response (flag attribute) * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -3091,6 +3101,8 @@ enum nl80211_bss { NL80211_BSS_SEEN_MS_AGO, NL80211_BSS_BEACON_IES, NL80211_BSS_CHAN_WIDTH, + NL80211_BSS_BEACON_TSF, + NL80211_BSS_PRESP_DATA, /* keep last */ __NL80211_BSS_AFTER_LAST, diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index a794d1dd7b40..f7fc507d82ab 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -289,6 +289,9 @@ enum ovs_key_attr { OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */ OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */ OVS_KEY_ATTR_TCP_FLAGS, /* be16 TCP flags. */ + OVS_KEY_ATTR_DP_HASH, /* u32 hash value. Value 0 indicates the hash + is not computed by the datapath. */ + OVS_KEY_ATTR_RECIRC_ID, /* u32 recirc id */ #ifdef __KERNEL__ OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */ @@ -493,6 +496,27 @@ struct ovs_action_push_vlan { __be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */ }; +/* Data path hash algorithm for computing Datapath hash. + * + * The algorithm type only specifies the fields in a flow + * will be used as part of the hash. Each datapath is free + * to use its own hash algorithm. The hash value will be + * opaque to the user space daemon. + */ +enum ovs_hash_alg { + OVS_HASH_ALG_L4, +}; + +/* + * struct ovs_action_hash - %OVS_ACTION_ATTR_HASH action argument. + * @hash_alg: Algorithm used to compute hash prior to recirculation. + * @hash_basis: basis used for computing hash. + */ +struct ovs_action_hash { + uint32_t hash_alg; /* One of ovs_hash_alg. */ + uint32_t hash_basis; +}; + /** * enum ovs_action_attr - Action types. * @@ -521,6 +545,8 @@ enum ovs_action_attr { OVS_ACTION_ATTR_PUSH_VLAN, /* struct ovs_action_push_vlan. */ OVS_ACTION_ATTR_POP_VLAN, /* No argument. */ OVS_ACTION_ATTR_SAMPLE, /* Nested OVS_SAMPLE_ATTR_*. */ + OVS_ACTION_ATTR_RECIRC, /* u32 recirc_id. */ + OVS_ACTION_ATTR_HASH, /* struct ovs_action_hash. */ __OVS_ACTION_ATTR_MAX }; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7f0dbcbb34af..8b7002488251 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -20,8 +20,12 @@ * Andi Kleen - Fix a few bad bugs and races. * Kris Katterjohn - Added many additional checks in bpf_check_classic() */ + #include <linux/filter.h> #include <linux/skbuff.h> +#include <linux/vmalloc.h> +#include <linux/random.h> +#include <linux/moduleloader.h> #include <asm/unaligned.h> /* Registers */ @@ -63,6 +67,105 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns return NULL; } +struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) +{ + gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | + gfp_extra_flags; + struct bpf_work_struct *ws; + struct bpf_prog *fp; + + size = round_up(size, PAGE_SIZE); + fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); + if (fp == NULL) + return NULL; + + ws = kmalloc(sizeof(*ws), GFP_KERNEL | gfp_extra_flags); + if (ws == NULL) { + vfree(fp); + return NULL; + } + + fp->pages = size / PAGE_SIZE; + fp->work = ws; + + return fp; +} +EXPORT_SYMBOL_GPL(bpf_prog_alloc); + +struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, + gfp_t gfp_extra_flags) +{ + gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | + gfp_extra_flags; + struct bpf_prog *fp; + + BUG_ON(fp_old == NULL); + + size = round_up(size, PAGE_SIZE); + if (size <= fp_old->pages * PAGE_SIZE) + return fp_old; + + fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); + if (fp != NULL) { + memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); + fp->pages = size / PAGE_SIZE; + + /* We keep fp->work from fp_old around in the new + * reallocated structure. + */ + fp_old->work = NULL; + __bpf_prog_free(fp_old); + } + + return fp; +} +EXPORT_SYMBOL_GPL(bpf_prog_realloc); + +void __bpf_prog_free(struct bpf_prog *fp) +{ + kfree(fp->work); + vfree(fp); +} +EXPORT_SYMBOL_GPL(__bpf_prog_free); + +#ifdef CONFIG_BPF_JIT +struct bpf_binary_header * +bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, + unsigned int alignment, + bpf_jit_fill_hole_t bpf_fill_ill_insns) +{ + struct bpf_binary_header *hdr; + unsigned int size, hole, start; + + /* Most of BPF filters are really small, but if some of them + * fill a page, allow at least 128 extra bytes to insert a + * random section of illegal instructions. + */ + size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); + hdr = module_alloc(size); + if (hdr == NULL) + return NULL; + + /* Fill space with illegal/arch-dep instructions. */ + bpf_fill_ill_insns(hdr, size); + + hdr->pages = size / PAGE_SIZE; + hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), + PAGE_SIZE - sizeof(*hdr)); + start = (prandom_u32() % hole) & ~(alignment - 1); + + /* Leave a random number of instructions before BPF code. */ + *image_ptr = &hdr->image[start]; + + return hdr; +} + +void bpf_jit_binary_free(struct bpf_binary_header *hdr) +{ + module_free(NULL, hdr); +} +#endif /* CONFIG_BPF_JIT */ + /* Base function for offset calculation. Needs to go into .text section, * therefore keeping it non-static as well; will also be used by JITs * anyway later on, so do not let the compiler omit it. @@ -180,6 +283,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W, [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H, [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B, + [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW, }; void *ptr; int off; @@ -239,6 +343,10 @@ select_insn: ALU64_MOV_K: DST = IMM; CONT; + LD_IMM_DW: + DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; + insn++; + CONT; ALU64_ARSH_X: (*(s64 *) &DST) >>= SRC; CONT; @@ -523,12 +631,26 @@ void bpf_prog_select_runtime(struct bpf_prog *fp) /* Probe if internal BPF can be JITed */ bpf_int_jit_compile(fp); + /* Lock whole bpf_prog as read-only */ + bpf_prog_lock_ro(fp); } EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); -/* free internal BPF program */ +static void bpf_prog_free_deferred(struct work_struct *work) +{ + struct bpf_work_struct *ws; + + ws = container_of(work, struct bpf_work_struct, work); + bpf_jit_free(ws->prog); +} + +/* Free internal BPF program */ void bpf_prog_free(struct bpf_prog *fp) { - bpf_jit_free(fp); + struct bpf_work_struct *ws = fp->work; + + INIT_WORK(&ws->work, bpf_prog_free_deferred); + ws->prog = fp; + schedule_work(&ws->work); } EXPORT_SYMBOL_GPL(bpf_prog_free); diff --git a/kernel/crash_dump.c b/kernel/crash_dump.c index c766ee54c0b1..b64e238b553b 100644 --- a/kernel/crash_dump.c +++ b/kernel/crash_dump.c @@ -18,6 +18,7 @@ unsigned long saved_max_pfn; * it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. */ unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; +EXPORT_SYMBOL_GPL(elfcorehdr_addr); /* * stores the size of elf header of crash image diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 44eb005c6695..84922befea84 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -395,16 +395,15 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) if (!filter) goto free_prog; - filter->prog = kzalloc(bpf_prog_size(new_len), - GFP_KERNEL|__GFP_NOWARN); + filter->prog = bpf_prog_alloc(bpf_prog_size(new_len), __GFP_NOWARN); if (!filter->prog) goto free_filter; ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len); if (ret) goto free_filter_prog; - kfree(fp); + kfree(fp); atomic_set(&filter->usage, 1); filter->prog->len = new_len; @@ -413,7 +412,7 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog) return filter; free_filter_prog: - kfree(filter->prog); + __bpf_prog_free(filter->prog); free_filter: kfree(filter); free_prog: diff --git a/lib/random32.c b/lib/random32.c index c9b6bf3afe0c..0bee183fa18f 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -37,6 +37,7 @@ #include <linux/jiffies.h> #include <linux/random.h> #include <linux/sched.h> +#include <asm/unaligned.h> #ifdef CONFIG_RANDOM32_SELFTEST static void __init prandom_state_selftest(void); @@ -96,27 +97,23 @@ EXPORT_SYMBOL(prandom_u32); * This is used for pseudo-randomness with no outside seeding. * For more random results, use prandom_bytes(). */ -void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes) +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes) { - unsigned char *p = buf; - int i; - - for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) { - u32 random = prandom_u32_state(state); - int j; + u8 *ptr = buf; - for (j = 0; j < sizeof(u32); j++) { - p[i + j] = random; - random >>= BITS_PER_BYTE; - } + while (bytes >= sizeof(u32)) { + put_unaligned(prandom_u32_state(state), (u32 *) ptr); + ptr += sizeof(u32); + bytes -= sizeof(u32); } - if (i < bytes) { - u32 random = prandom_u32_state(state); - for (; i < bytes; i++) { - p[i] = random; - random >>= BITS_PER_BYTE; - } + if (bytes > 0) { + u32 rem = prandom_u32_state(state); + do { + *ptr++ = (u8) rem; + bytes--; + rem >>= BITS_PER_BYTE; + } while (bytes > 0); } } EXPORT_SYMBOL(prandom_bytes_state); @@ -126,7 +123,7 @@ EXPORT_SYMBOL(prandom_bytes_state); * @buf: where to copy the pseudo-random bytes to * @bytes: the requested number of bytes */ -void prandom_bytes(void *buf, int bytes) +void prandom_bytes(void *buf, size_t bytes) { struct rnd_state *state = &get_cpu_var(net_rand_state); @@ -137,7 +134,7 @@ EXPORT_SYMBOL(prandom_bytes); static void prandom_warmup(struct rnd_state *state) { - /* Calling RNG ten times to satify recurrence condition */ + /* Calling RNG ten times to satisfy recurrence condition */ prandom_u32_state(state); prandom_u32_state(state); prandom_u32_state(state); @@ -152,7 +149,7 @@ static void prandom_warmup(struct rnd_state *state) static u32 __extract_hwseed(void) { - u32 val = 0; + unsigned int val = 0; (void)(arch_get_random_seed_int(&val) || arch_get_random_int(&val)); @@ -228,7 +225,7 @@ static void __prandom_timer(unsigned long dontcare) prandom_seed(entropy); /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ - expires = 40 + (prandom_u32() % 40); + expires = 40 + prandom_u32_max(40); seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); add_timer(&seed_timer); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 7b36e4d40ed7..25f14c586ad7 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -297,7 +297,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) ASSERT_RHT_MUTEX(ht); - if (tbl->size <= HASH_MIN_SIZE) + if (ht->shift <= ht->p.min_shift) return 0; ntbl = bucket_table_alloc(tbl->size / 2, flags); @@ -505,9 +505,10 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, } EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); -static size_t rounded_hashtable_size(unsigned int nelem) +static size_t rounded_hashtable_size(struct rhashtable_params *params) { - return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE); + return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), + 1UL << params->min_shift); } /** @@ -565,8 +566,11 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) (!params->key_len && !params->obj_hashfn)) return -EINVAL; + params->min_shift = max_t(size_t, params->min_shift, + ilog2(HASH_MIN_SIZE)); + if (params->nelem_hint) - size = rounded_hashtable_size(params->nelem_hint); + size = rounded_hashtable_size(params); tbl = bucket_table_alloc(size, GFP_KERNEL); if (tbl == NULL) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 89e0345733bd..23e070bcf72d 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -1342,6 +1342,44 @@ static struct bpf_test tests[] = { { { 0, -1 } } }, { + "INT: shifts by register", + .u.insns_int = { + BPF_MOV64_IMM(R0, -1234), + BPF_MOV64_IMM(R1, 1), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R2, 1), + BPF_ALU64_REG(BPF_LSH, R0, R2), + BPF_MOV32_IMM(R4, -1234), + BPF_JMP_REG(BPF_JEQ, R0, R4, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_AND, R4, 63), + BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */ + BPF_MOV64_IMM(R3, 47), + BPF_ALU64_REG(BPF_ARSH, R0, R3), + BPF_JMP_IMM(BPF_JEQ, R0, -617, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R2, 1), + BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */ + BPF_JMP_IMM(BPF_JEQ, R4, 92, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R4, 4), + BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */ + BPF_JMP_IMM(BPF_JEQ, R4, 64, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R4, 5), + BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */ + BPF_JMP_IMM(BPF_JEQ, R4, 160, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(R0, -1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } } + }, + { "INT: DIV + ABS", .u.insns_int = { BPF_ALU64_REG(BPF_MOV, R6, R1), @@ -1697,6 +1735,27 @@ static struct bpf_test tests[] = { { }, { { 1, 0 } }, }, + { + "load 64-bit immediate", + .u.insns_int = { + BPF_LD_IMM64(R1, 0x567800001234LL), + BPF_MOV64_REG(R2, R1), + BPF_MOV64_REG(R3, R2), + BPF_ALU64_IMM(BPF_RSH, R2, 32), + BPF_ALU64_IMM(BPF_LSH, R3, 32), + BPF_ALU64_IMM(BPF_RSH, R3, 32), + BPF_ALU64_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } } + }, }; static struct net_device dev; @@ -1798,7 +1857,7 @@ static struct bpf_prog *generate_filter(int which, int *err) break; case INTERNAL: - fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL); + fp = bpf_prog_alloc(bpf_prog_size(flen), 0); if (fp == NULL) { pr_cont("UNEXPECTED_FAIL no memory left\n"); *err = -ENOMEM; @@ -1835,7 +1894,7 @@ static int __run_one(const struct bpf_prog *fp, const void *data, int runs, u64 *duration) { u64 start, finish; - int ret, i; + int ret = 0, i; start = ktime_to_us(ktime_get()); diff --git a/net/atm/clip.c b/net/atm/clip.c index 46339040fef0..1d9eaa4f041a 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -384,7 +384,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev); old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ if (old) { - pr_warning("XOFF->XOFF transition\n"); + pr_warn("XOFF->XOFF transition\n"); goto out_release_neigh; } dev->stats.tx_packets++; @@ -447,7 +447,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) struct rtable *rt; if (vcc->push != clip_push) { - pr_warning("non-CLIP VCC\n"); + pr_warn("non-CLIP VCC\n"); return -EBADF; } clip_vcc = CLIP_VCC(vcc); diff --git a/net/atm/common.c b/net/atm/common.c index 7b491006eaf4..6a765156a3f6 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -300,7 +300,7 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal) max_sdu = ATM_MAX_AAL34_PDU; break; default: - pr_warning("AAL problems ... (%d)\n", aal); + pr_warn("AAL problems ... (%d)\n", aal); /* fall through */ case ATM_AAL5: max_sdu = ATM_MAX_AAL5_PDU; diff --git a/net/atm/mpc.c b/net/atm/mpc.c index e8e0e7a8a23d..0e982222d425 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -599,7 +599,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb, } non_ip: - return mpc->old_ops->ndo_start_xmit(skb, dev); + return __netdev_start_xmit(mpc->old_ops, skb, dev, false); } static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 206b65ccd5b8..35ebe79c87b0 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -772,16 +772,16 @@ static inline void chan_ready_cb(struct l2cap_chan *chan) ifup(dev->netdev); } -static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *chan) +static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan) { - struct l2cap_chan *pchan; + struct l2cap_chan *chan; - pchan = chan_open(chan); - pchan->ops = chan->ops; + chan = chan_open(pchan); + chan->ops = pchan->ops; BT_DBG("chan %p pchan %p", chan, pchan); - return pchan; + return chan; } static void delete_netdev(struct work_struct *work) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 1d9c29a00568..9b7145959a49 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1898,6 +1898,8 @@ static int __hci_init(struct hci_dev *hdev) debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs, &hdev->discov_interleaved_timeout); + + smp_register(hdev); } return 0; @@ -3238,7 +3240,7 @@ struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) } list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { - if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) { + if (smp_irk_matches(hdev, irk->val, rpa)) { bacpy(&irk->rpa, rpa); return irk; } @@ -3892,7 +3894,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy, !bacmp(&hdev->random_addr, &hdev->rpa)) return 0; - err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa); + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); if (err < 0) { BT_ERR("%s failed to generate new RPA", hdev->name); return err; @@ -4100,18 +4102,9 @@ int hci_register_dev(struct hci_dev *hdev) dev_set_name(&hdev->dev, "%s", hdev->name); - hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(hdev->tfm_aes)) { - BT_ERR("Unable to create crypto context"); - error = PTR_ERR(hdev->tfm_aes); - hdev->tfm_aes = NULL; - goto err_wqueue; - } - error = device_add(&hdev->dev); if (error < 0) - goto err_tfm; + goto err_wqueue; hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, @@ -4153,8 +4146,6 @@ int hci_register_dev(struct hci_dev *hdev) return id; -err_tfm: - crypto_free_blkcipher(hdev->tfm_aes); err_wqueue: destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); @@ -4206,8 +4197,7 @@ void hci_unregister_dev(struct hci_dev *hdev) rfkill_destroy(hdev->rfkill); } - if (hdev->tfm_aes) - crypto_free_blkcipher(hdev->tfm_aes); + smp_unregister(hdev); device_del(&hdev->dev); @@ -5690,3 +5680,52 @@ void hci_update_background_scan(struct hci_dev *hdev) if (err) BT_ERR("Failed to run HCI request: err %d", err); } + +static bool disconnected_whitelist_entries(struct hci_dev *hdev) +{ + struct bdaddr_list *b; + + list_for_each_entry(b, &hdev->whitelist, list) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); + if (!conn) + return true; + + if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) + return true; + } + + return false; +} + +void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req) +{ + u8 scan; + + if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) + return; + + if (!hdev_is_powered(hdev)) + return; + + if (mgmt_powering_down(hdev)) + return; + + if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) || + disconnected_whitelist_entries(hdev)) + scan = SCAN_PAGE; + else + scan = SCAN_DISABLED; + + if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE)) + return; + + if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) + scan |= SCAN_INQUIRY; + + if (req) + hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); + else + hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); +} diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index a6000823f0ff..3a99f30a3317 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2071,6 +2071,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) cp.handle = ev->handle; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, sizeof(cp), &cp); + + hci_update_page_scan(hdev, NULL); } /* Set packet type for incoming connection */ @@ -2247,9 +2249,12 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, reason, mgmt_connected); - if (conn->type == ACL_LINK && - test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) - hci_remove_link_key(hdev, &conn->dst); + if (conn->type == ACL_LINK) { + if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + hci_remove_link_key(hdev, &conn->dst); + + hci_update_page_scan(hdev, NULL); + } params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); if (params) { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 46547b920f88..4a90438d99df 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -210,6 +210,10 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) { write_lock(&chan_list_lock); + /* Override the defaults (which are for conn-oriented) */ + chan->omtu = L2CAP_DEFAULT_MTU; + chan->chan_type = L2CAP_CHAN_FIXED; + chan->scid = scid; write_unlock(&chan_list_lock); @@ -562,6 +566,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) BT_DBG("chan %p, conn %p, err %d", chan, conn, err); + chan->ops->teardown(chan, err); + if (conn) { struct amp_mgr *mgr = conn->hcon->amp_mgr; /* Delete from channel list */ @@ -585,8 +591,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) amp_disconnect_logical_link(hs_hchan); } - chan->ops->teardown(chan, err); - if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) return; @@ -1082,6 +1086,9 @@ static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) { + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) + return true; + return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); } @@ -1417,71 +1424,18 @@ static void l2cap_conn_start(struct l2cap_conn *conn) mutex_unlock(&conn->chan_lock); } -/* Find socket with cid and source/destination bdaddr. - * Returns closest match, locked. - */ -static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid, - bdaddr_t *src, - bdaddr_t *dst) -{ - struct l2cap_chan *c, *c1 = NULL; - - read_lock(&chan_list_lock); - - list_for_each_entry(c, &chan_list, global_l) { - if (state && c->state != state) - continue; - - if (c->scid == cid) { - int src_match, dst_match; - int src_any, dst_any; - - /* Exact match. */ - src_match = !bacmp(&c->src, src); - dst_match = !bacmp(&c->dst, dst); - if (src_match && dst_match) { - read_unlock(&chan_list_lock); - return c; - } - - /* Closest match */ - src_any = !bacmp(&c->src, BDADDR_ANY); - dst_any = !bacmp(&c->dst, BDADDR_ANY); - if ((src_match && dst_any) || (src_any && dst_match) || - (src_any && dst_any)) - c1 = c; - } - } - - read_unlock(&chan_list_lock); - - return c1; -} - static void l2cap_le_conn_ready(struct l2cap_conn *conn) { struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; - struct l2cap_chan *chan, *pchan; - u8 dst_type; - BT_DBG(""); + BT_DBG("%s conn %p", hdev->name, conn); - /* Check if we have socket listening on cid */ - pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT, - &hcon->src, &hcon->dst); - if (!pchan) - return; - - /* Client ATT sockets should override the server one */ - if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT)) - return; - - dst_type = bdaddr_type(hcon, hcon->dst_type); - - /* If device is blocked, do not create a channel for it */ - if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type)) - return; + /* For outgoing pairing which doesn't necessarily have an + * associated socket (e.g. mgmt_pair_device). + */ + if (hcon->out) + smp_conn_security(hcon, hcon->pending_sec_level); /* For LE slave connections, make sure the connection interval * is in the range of the minium and maximum interval that has @@ -1501,22 +1455,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req); } - - l2cap_chan_lock(pchan); - - chan = pchan->ops->new_connection(pchan); - if (!chan) - goto clean; - - bacpy(&chan->src, &hcon->src); - bacpy(&chan->dst, &hcon->dst); - chan->src_type = bdaddr_type(hcon, hcon->src_type); - chan->dst_type = dst_type; - - __l2cap_chan_add(conn, chan); - -clean: - l2cap_chan_unlock(pchan); } static void l2cap_conn_ready(struct l2cap_conn *conn) @@ -1526,17 +1464,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) BT_DBG("conn %p", conn); - /* For outgoing pairing which doesn't necessarily have an - * associated socket (e.g. mgmt_pair_device). - */ - if (hcon->out && hcon->type == LE_LINK) - smp_conn_security(hcon, hcon->pending_sec_level); - mutex_lock(&conn->chan_lock); - if (hcon->type == LE_LINK) - l2cap_le_conn_ready(conn); - list_for_each_entry(chan, &conn->chan_l, list) { l2cap_chan_lock(chan); @@ -1560,6 +1489,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) mutex_unlock(&conn->chan_lock); + if (hcon->type == LE_LINK) + l2cap_le_conn_ready(conn); + queue_work(hcon->hdev->workqueue, &conn->pending_rx_work); } @@ -1695,6 +1627,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) if (work_pending(&conn->pending_rx_work)) cancel_work_sync(&conn->pending_rx_work); + if (work_pending(&conn->disconn_work)) + cancel_work_sync(&conn->disconn_work); + l2cap_unregister_all_users(conn); mutex_lock(&conn->chan_lock); @@ -1719,27 +1654,29 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) cancel_delayed_work_sync(&conn->info_timer); - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) { - cancel_delayed_work_sync(&conn->security_timer); - smp_chan_destroy(conn); - } - hcon->l2cap_data = NULL; conn->hchan = NULL; l2cap_conn_put(conn); } -static void security_timeout(struct work_struct *work) +static void disconn_work(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, - security_timer.work); + disconn_work); BT_DBG("conn %p", conn); - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { - smp_chan_destroy(conn); - l2cap_conn_del(conn->hcon, ETIMEDOUT); - } + l2cap_conn_del(conn->hcon, conn->disconn_err); +} + +void l2cap_conn_shutdown(struct l2cap_conn *conn, int err) +{ + struct hci_dev *hdev = conn->hcon->hdev; + + BT_DBG("conn %p err %d", conn, err); + + conn->disconn_err = err; + queue_work(hdev->workqueue, &conn->disconn_work); } static void l2cap_conn_free(struct kref *ref) @@ -1794,6 +1731,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, src_match = !bacmp(&c->src, src); dst_match = !bacmp(&c->dst, dst); if (src_match && dst_match) { + l2cap_chan_hold(c); read_unlock(&chan_list_lock); return c; } @@ -1807,6 +1745,9 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, } } + if (c1) + l2cap_chan_hold(c1); + read_unlock(&chan_list_lock); return c1; @@ -2027,10 +1968,12 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan) tx_skb->data + L2CAP_HDR_SIZE); } + /* Update FCS */ if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len); - put_unaligned_le16(fcs, skb_put(tx_skb, - L2CAP_FCS_SIZE)); + u16 fcs = crc16(0, (u8 *) tx_skb->data, + tx_skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) - + L2CAP_FCS_SIZE); } l2cap_do_send(chan, tx_skb); @@ -2334,7 +2277,6 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, } else { sar = L2CAP_SAR_START; sdu_len = len; - pdu_len -= L2CAP_SDULEN_SIZE; } while (len > 0) { @@ -2349,10 +2291,8 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, __skb_queue_tail(seg_queue, skb); len -= pdu_len; - if (sdu_len) { + if (sdu_len) sdu_len = 0; - pdu_len += L2CAP_SDULEN_SIZE; - } if (len <= pdu_len) { sar = L2CAP_SAR_END; @@ -3884,6 +3824,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, response: l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); + l2cap_chan_put(pchan); sendresp: rsp.scid = cpu_to_le16(scid); @@ -5497,6 +5438,7 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn, response_unlock: l2cap_chan_unlock(pchan); mutex_unlock(&conn->chan_lock); + l2cap_chan_put(pchan); if (result == L2CAP_CR_PEND) return 0; @@ -6845,12 +6787,12 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct l2cap_chan *chan; if (hcon->type != ACL_LINK) - goto drop; + goto free_skb; chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, ACL_LINK); if (!chan) - goto drop; + goto free_skb; BT_DBG("chan %p, len %d", chan, skb->len); @@ -6864,36 +6806,14 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, bacpy(&bt_cb(skb)->bdaddr, &hcon->dst); bt_cb(skb)->psm = psm; - if (!chan->ops->recv(chan, skb)) - return; - -drop: - kfree_skb(skb); -} - -static void l2cap_att_channel(struct l2cap_conn *conn, - struct sk_buff *skb) -{ - struct hci_conn *hcon = conn->hcon; - struct l2cap_chan *chan; - - if (hcon->type != LE_LINK) - goto drop; - - chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT, - &hcon->src, &hcon->dst); - if (!chan) - goto drop; - - BT_DBG("chan %p, len %d", chan, skb->len); - - if (chan->imtu < skb->len) - goto drop; - - if (!chan->ops->recv(chan, skb)) + if (!chan->ops->recv(chan, skb)) { + l2cap_chan_put(chan); return; + } drop: + l2cap_chan_put(chan); +free_skb: kfree_skb(skb); } @@ -6942,19 +6862,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) l2cap_conless_channel(conn, psm, skb); break; - case L2CAP_CID_ATT: - l2cap_att_channel(conn, skb); - break; - case L2CAP_CID_LE_SIGNALING: l2cap_le_sig_channel(conn, skb); break; - case L2CAP_CID_SMP: - if (smp_sig_channel(conn, skb)) - l2cap_conn_del(conn->hcon, EACCES); - break; - default: l2cap_data_channel(conn, cid, skb); break; @@ -7023,10 +6934,9 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) INIT_LIST_HEAD(&conn->chan_l); INIT_LIST_HEAD(&conn->users); - if (hcon->type == LE_LINK) - INIT_DELAYED_WORK(&conn->security_timer, security_timeout); - else - INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); + INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); + + INIT_WORK(&conn->disconn_work, disconn_work); skb_queue_head_init(&conn->pending_rx); INIT_WORK(&conn->pending_rx_work, process_pending_rx); @@ -7239,19 +7149,99 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) return exact ? lm1 : lm2; } +/* Find the next fixed channel in BT_LISTEN state, continue iteration + * from an existing channel in the list or from the beginning of the + * global list (by passing NULL as first parameter). + */ +static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, + bdaddr_t *src, u8 link_type) +{ + read_lock(&chan_list_lock); + + if (c) + c = list_next_entry(c, global_l); + else + c = list_entry(chan_list.next, typeof(*c), global_l); + + list_for_each_entry_from(c, &chan_list, global_l) { + if (c->chan_type != L2CAP_CHAN_FIXED) + continue; + if (c->state != BT_LISTEN) + continue; + if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY)) + continue; + if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) + continue; + if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) + continue; + + l2cap_chan_hold(c); + read_unlock(&chan_list_lock); + return c; + } + + read_unlock(&chan_list_lock); + + return NULL; +} + void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { + struct hci_dev *hdev = hcon->hdev; struct l2cap_conn *conn; + struct l2cap_chan *pchan; + u8 dst_type; BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); - if (!status) { - conn = l2cap_conn_add(hcon); - if (conn) - l2cap_conn_ready(conn); - } else { + if (status) { l2cap_conn_del(hcon, bt_to_errno(status)); + return; } + + conn = l2cap_conn_add(hcon); + if (!conn) + return; + + dst_type = bdaddr_type(hcon, hcon->dst_type); + + /* If device is blocked, do not create channels for it */ + if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type)) + return; + + /* Find fixed channels and notify them of the new connection. We + * use multiple individual lookups, continuing each time where + * we left off, because the list lock would prevent calling the + * potentially sleeping l2cap_chan_lock() function. + */ + pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type); + while (pchan) { + struct l2cap_chan *chan, *next; + + /* Client fixed channels should override server ones */ + if (__l2cap_get_chan_by_dcid(conn, pchan->scid)) + goto next; + + l2cap_chan_lock(pchan); + chan = pchan->ops->new_connection(pchan); + if (chan) { + bacpy(&chan->src, &hcon->src); + bacpy(&chan->dst, &hcon->dst); + chan->src_type = bdaddr_type(hcon, hcon->src_type); + chan->dst_type = dst_type; + + __l2cap_chan_add(conn, chan); + } + + l2cap_chan_unlock(pchan); +next: + next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr, + hcon->type); + l2cap_chan_put(pchan); + pchan = next; + } + + l2cap_conn_ready(conn); } int l2cap_disconn_ind(struct hci_conn *hcon) @@ -7299,12 +7289,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); - if (hcon->type == LE_LINK) { - if (!status && encrypt) - smp_distribute_keys(conn); - cancel_delayed_work(&conn->security_timer); - } - mutex_lock(&conn->chan_lock); list_for_each_entry(chan, &conn->chan_l, list) { @@ -7318,15 +7302,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) continue; } - if (chan->scid == L2CAP_CID_ATT) { - if (!status && encrypt) { - chan->sec_level = hcon->sec_level; - l2cap_chan_ready(chan); - } - - l2cap_chan_unlock(chan); - continue; - } + if (!status && encrypt) + chan->sec_level = hcon->sec_level; if (!__l2cap_no_conn_pending(chan)) { l2cap_chan_unlock(chan); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 1884f72083c2..ed06f88e6f10 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -99,15 +99,6 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) return -EINVAL; - if (la.l2_cid) { - /* When the socket gets created it defaults to - * CHAN_CONN_ORIENTED, so we need to overwrite the - * default here. - */ - chan->chan_type = L2CAP_CHAN_FIXED; - chan->omtu = L2CAP_DEFAULT_MTU; - } - if (bdaddr_type_is_le(la.l2_bdaddr_type)) { /* We only allow ATT user space socket */ if (la.l2_cid && @@ -790,6 +781,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, if (chan->scid == L2CAP_CID_ATT) { if (smp_conn_security(conn->hcon, sec.level)) break; + set_bit(FLAG_PENDING_SECURITY, &chan->flags); sk->sk_state = BT_CONFIG; chan->state = BT_CONFIG; @@ -1359,6 +1351,11 @@ static void l2cap_sock_resume_cb(struct l2cap_chan *chan) { struct sock *sk = chan->data; + if (test_and_clear_bit(FLAG_PENDING_SECURITY, &chan->flags)) { + sk->sk_state = BT_CONNECTED; + chan->state = BT_CONNECTED; + } + clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); sk->sk_state_change(sk); } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index b8554d429d88..c2457435a670 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -129,9 +129,6 @@ static const u16 mgmt_events[] = { #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) -#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \ - !test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) - struct pending_cmd { struct list_head list; u16 opcode; @@ -1536,9 +1533,11 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status) /* When the discoverable mode gets changed, make sure * that class of device has the limited discoverable - * bit correctly set. + * bit correctly set. Also update page scan based on whitelist + * entries. */ hci_req_init(&req, hdev); + hci_update_page_scan(hdev, &req); update_class(&req); hci_req_run(&req, NULL); @@ -1785,6 +1784,7 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status) if (conn_changed || discov_changed) { new_settings(hdev, cmd->sk); + hci_update_page_scan(hdev, NULL); if (discov_changed) mgmt_update_adv_data(hdev); hci_update_background_scan(hdev); @@ -1818,6 +1818,7 @@ static int set_connectable_update_settings(struct hci_dev *hdev, return err; if (changed) { + hci_update_page_scan(hdev, NULL); hci_update_background_scan(hdev); return new_settings(hdev, sk); } @@ -4381,27 +4382,6 @@ unlock: return err; } -static void set_bredr_scan(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - u8 scan = 0; - - /* Ensure that fast connectable is disabled. This function will - * not do anything if the page scan parameters are already what - * they should be. - */ - write_fast_connectable(req, false); - - if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) || - !list_empty(&hdev->whitelist)) - scan |= SCAN_PAGE; - if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) - scan |= SCAN_INQUIRY; - - if (scan) - hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); -} - static void set_bredr_complete(struct hci_dev *hdev, u8 status) { struct pending_cmd *cmd; @@ -4507,9 +4487,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) hci_req_init(&req, hdev); - if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) || - !list_empty(&hdev->whitelist)) - set_bredr_scan(&req); + write_fast_connectable(&req, false); + hci_update_page_scan(hdev, &req); /* Since only the advertising data flags will change, there * is no need to update the scan response data. @@ -5235,27 +5214,6 @@ unlock: return err; } -/* Helper for Add/Remove Device commands */ -static void update_page_scan(struct hci_dev *hdev, u8 scan) -{ - if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) - return; - - if (!hdev_is_powered(hdev)) - return; - - /* If HCI_CONNECTABLE is set then Add/Remove Device should not - * make any changes to page scanning. - */ - if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) - return; - - if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) - scan |= SCAN_INQUIRY; - - hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); -} - static void device_added(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type, u8 action) { @@ -5291,8 +5249,6 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); if (cp->addr.type == BDADDR_BREDR) { - bool update_scan; - /* Only incoming connections action is supported for now */ if (cp->action != 0x01) { err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, @@ -5301,15 +5257,12 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - update_scan = list_empty(&hdev->whitelist); - err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr, cp->addr.type); if (err) goto unlock; - if (update_scan) - update_page_scan(hdev, SCAN_PAGE); + hci_update_page_scan(hdev, NULL); goto added; } @@ -5392,8 +5345,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (list_empty(&hdev->whitelist)) - update_page_scan(hdev, SCAN_DISABLED); + hci_update_page_scan(hdev, NULL); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); @@ -5444,7 +5396,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, kfree(b); } - update_page_scan(hdev, SCAN_DISABLED); + hci_update_page_scan(hdev, NULL); list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) { if (p->auto_connect == HCI_AUTO_CONN_DISABLED) @@ -5969,8 +5921,8 @@ static int powered_update_hci(struct hci_dev *hdev) sizeof(link_sec), &link_sec); if (lmp_bredr_capable(hdev)) { - if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) - set_bredr_scan(&req); + write_fast_connectable(&req, false); + hci_update_page_scan(hdev, &req); update_class(&req); update_name(&req); update_eir(&req); @@ -6281,25 +6233,35 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_remove(cmd); } +bool mgmt_powering_down(struct hci_dev *hdev) +{ + struct pending_cmd *cmd; + struct mgmt_mode *cp; + + cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); + if (!cmd) + return false; + + cp = cmd->param; + if (!cp->val) + return true; + + return false; +} + void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected) { struct mgmt_ev_device_disconnected ev; - struct pending_cmd *power_off; struct sock *sk = NULL; - power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); - if (power_off) { - struct mgmt_mode *cp = power_off->param; - - /* The connection is still in hci_conn_hash so test for 1 - * instead of 0 to know if this is the last one. - */ - if (!cp->val && hci_conn_count(hdev) == 1) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); - } + /* The connection is still in hci_conn_hash so test for 1 + * instead of 0 to know if this is the last one. + */ + if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); } if (!mgmt_connected) @@ -6359,19 +6321,13 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { struct mgmt_ev_connect_failed ev; - struct pending_cmd *power_off; - - power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev); - if (power_off) { - struct mgmt_mode *cp = power_off->param; - /* The connection is still in hci_conn_hash so test for 1 - * instead of 0 to know if this is the last one. - */ - if (!cp->val && hci_conn_count(hdev) == 1) { - cancel_delayed_work(&hdev->power_off); - queue_work(hdev->req_workqueue, &hdev->power_off.work); - } + /* The connection is still in hci_conn_hash so test for 1 + * instead of 0 to know if this is the last one. + */ + if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); } bacpy(&ev.addr.bdaddr, bdaddr); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index fd3294300803..07ca4ce0943b 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -44,7 +44,10 @@ enum { }; struct smp_chan { - struct l2cap_conn *conn; + struct l2cap_conn *conn; + struct delayed_work security_timer; + struct work_struct distribute_work; + u8 preq[7]; /* SMP Pairing Request */ u8 prsp[7]; /* SMP Pairing Response */ u8 prnd[16]; /* SMP Pairing Random (local) */ @@ -139,12 +142,18 @@ static int smp_ah(struct crypto_blkcipher *tfm, u8 irk[16], u8 r[3], u8 res[3]) return 0; } -bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], - bdaddr_t *bdaddr) +bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr) { + struct l2cap_chan *chan = hdev->smp_data; + struct crypto_blkcipher *tfm; u8 hash[3]; int err; + if (!chan || !chan->data) + return false; + + tfm = chan->data; + BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk); err = smp_ah(tfm, irk, &bdaddr->b[3], hash); @@ -154,10 +163,17 @@ bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], return !memcmp(bdaddr->b, hash, 3); } -int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa) +int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa) { + struct l2cap_chan *chan = hdev->smp_data; + struct crypto_blkcipher *tfm; int err; + if (!chan || !chan->data) + return -EOPNOTSUPP; + + tfm = chan->data; + get_random_bytes(&rpa->b[3], 3); rpa->b[5] &= 0x3f; /* Clear two most significant bits */ @@ -235,47 +251,39 @@ static int smp_s1(struct smp_chan *smp, u8 k[16], u8 r1[16], u8 r2[16], return err; } -static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code, - u16 dlen, void *data) +static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) { - struct sk_buff *skb; - struct l2cap_hdr *lh; - int len; - - len = L2CAP_HDR_SIZE + sizeof(code) + dlen; - - if (len > conn->mtu) - return NULL; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp; + struct kvec iv[2]; + struct msghdr msg; - skb = bt_skb_alloc(len, GFP_ATOMIC); - if (!skb) - return NULL; + if (!chan) + return; - lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); - lh->len = cpu_to_le16(sizeof(code) + dlen); - lh->cid = cpu_to_le16(L2CAP_CID_SMP); + BT_DBG("code 0x%2.2x", code); - memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code)); + iv[0].iov_base = &code; + iv[0].iov_len = 1; - memcpy(skb_put(skb, dlen), data, dlen); + iv[1].iov_base = data; + iv[1].iov_len = len; - return skb; -} + memset(&msg, 0, sizeof(msg)); -static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) -{ - struct sk_buff *skb = smp_build_cmd(conn, code, len, data); + msg.msg_iov = (struct iovec *) &iv; + msg.msg_iovlen = 2; - BT_DBG("code 0x%2.2x", code); + l2cap_chan_send(chan, &msg, 1 + len); - if (!skb) + if (!chan->data) return; - skb->priority = HCI_PRIO_MAX; - hci_send_acl(conn->hchan, skb, 0); + smp = chan->data; - cancel_delayed_work_sync(&conn->security_timer); - schedule_delayed_work(&conn->security_timer, SMP_TIMEOUT); + cancel_delayed_work_sync(&smp->security_timer); + if (test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) + schedule_delayed_work(&smp->security_timer, SMP_TIMEOUT); } static __u8 authreq_to_seclevel(__u8 authreq) @@ -302,7 +310,8 @@ static void build_pairing_cmd(struct l2cap_conn *conn, struct smp_cmd_pairing *req, struct smp_cmd_pairing *rsp, __u8 authreq) { - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; u8 local_dist = 0, remote_dist = 0; @@ -345,7 +354,8 @@ static void build_pairing_cmd(struct l2cap_conn *conn, static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) { - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) || (max_key_size < SMP_MIN_ENC_KEY_SIZE)) @@ -356,9 +366,61 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) return 0; } +static void smp_chan_destroy(struct l2cap_conn *conn) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + bool complete; + + BUG_ON(!smp); + + cancel_delayed_work_sync(&smp->security_timer); + /* In case the timeout freed the SMP context */ + if (!chan->data) + return; + + if (work_pending(&smp->distribute_work)) { + cancel_work_sync(&smp->distribute_work); + if (!chan->data) + return; + } + + complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); + mgmt_smp_complete(conn->hcon, complete); + + kfree(smp->csrk); + kfree(smp->slave_csrk); + + crypto_free_blkcipher(smp->tfm_aes); + + /* If pairing failed clean up any keys we might have */ + if (!complete) { + if (smp->ltk) { + list_del(&smp->ltk->list); + kfree(smp->ltk); + } + + if (smp->slave_ltk) { + list_del(&smp->slave_ltk->list); + kfree(smp->slave_ltk); + } + + if (smp->remote_irk) { + list_del(&smp->remote_irk->list); + kfree(smp->remote_irk); + } + } + + chan->data = NULL; + kfree(smp); + hci_conn_drop(conn->hcon); +} + static void smp_failure(struct l2cap_conn *conn, u8 reason) { struct hci_conn *hcon = conn->hcon; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp; if (reason) smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), @@ -368,7 +430,10 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason) mgmt_auth_failed(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type, HCI_ERROR_AUTH_FAILURE); - cancel_delayed_work_sync(&conn->security_timer); + if (!chan->data) + return; + + smp = chan->data; if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) smp_chan_destroy(conn); @@ -405,7 +470,8 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, u8 local_io, u8 remote_io) { struct hci_conn *hcon = conn->hcon; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; u8 method; u32 passkey = 0; int ret = 0; @@ -574,8 +640,201 @@ static u8 smp_random(struct smp_chan *smp) return 0; } +static void smp_notify_keys(struct l2cap_conn *conn) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct smp_cmd_pairing *req = (void *) &smp->preq[1]; + struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1]; + bool persistent; + + if (smp->remote_irk) { + mgmt_new_irk(hdev, smp->remote_irk); + /* Now that user space can be considered to know the + * identity address track the connection based on it + * from now on. + */ + bacpy(&hcon->dst, &smp->remote_irk->bdaddr); + hcon->dst_type = smp->remote_irk->addr_type; + l2cap_conn_update_id_addr(hcon); + + /* When receiving an indentity resolving key for + * a remote device that does not use a resolvable + * private address, just remove the key so that + * it is possible to use the controller white + * list for scanning. + * + * Userspace will have been told to not store + * this key at this point. So it is safe to + * just remove it. + */ + if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) { + list_del(&smp->remote_irk->list); + kfree(smp->remote_irk); + smp->remote_irk = NULL; + } + } + + /* The LTKs and CSRKs should be persistent only if both sides + * had the bonding bit set in their authentication requests. + */ + persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING); + + if (smp->csrk) { + smp->csrk->bdaddr_type = hcon->dst_type; + bacpy(&smp->csrk->bdaddr, &hcon->dst); + mgmt_new_csrk(hdev, smp->csrk, persistent); + } + + if (smp->slave_csrk) { + smp->slave_csrk->bdaddr_type = hcon->dst_type; + bacpy(&smp->slave_csrk->bdaddr, &hcon->dst); + mgmt_new_csrk(hdev, smp->slave_csrk, persistent); + } + + if (smp->ltk) { + smp->ltk->bdaddr_type = hcon->dst_type; + bacpy(&smp->ltk->bdaddr, &hcon->dst); + mgmt_new_ltk(hdev, smp->ltk, persistent); + } + + if (smp->slave_ltk) { + smp->slave_ltk->bdaddr_type = hcon->dst_type; + bacpy(&smp->slave_ltk->bdaddr, &hcon->dst); + mgmt_new_ltk(hdev, smp->slave_ltk, persistent); + } +} + +static void smp_distribute_keys(struct work_struct *work) +{ + struct smp_chan *smp = container_of(work, struct smp_chan, + distribute_work); + struct smp_cmd_pairing *req, *rsp; + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + __u8 *keydist; + + BT_DBG("conn %p", conn); + + if (!test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) + return; + + rsp = (void *) &smp->prsp[1]; + + /* The responder sends its keys first */ + if (hcon->out && (smp->remote_key_dist & 0x07)) + return; + + req = (void *) &smp->preq[1]; + + if (hcon->out) { + keydist = &rsp->init_key_dist; + *keydist &= req->init_key_dist; + } else { + keydist = &rsp->resp_key_dist; + *keydist &= req->resp_key_dist; + } + + BT_DBG("keydist 0x%x", *keydist); + + if (*keydist & SMP_DIST_ENC_KEY) { + struct smp_cmd_encrypt_info enc; + struct smp_cmd_master_ident ident; + struct smp_ltk *ltk; + u8 authenticated; + __le16 ediv; + __le64 rand; + + get_random_bytes(enc.ltk, sizeof(enc.ltk)); + get_random_bytes(&ediv, sizeof(ediv)); + get_random_bytes(&rand, sizeof(rand)); + + smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); + + authenticated = hcon->sec_level == BT_SECURITY_HIGH; + ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, + SMP_LTK_SLAVE, authenticated, enc.ltk, + smp->enc_key_size, ediv, rand); + smp->slave_ltk = ltk; + + ident.ediv = ediv; + ident.rand = rand; + + smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); + + *keydist &= ~SMP_DIST_ENC_KEY; + } + + if (*keydist & SMP_DIST_ID_KEY) { + struct smp_cmd_ident_addr_info addrinfo; + struct smp_cmd_ident_info idinfo; + + memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk)); + + smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); + + /* The hci_conn contains the local identity address + * after the connection has been established. + * + * This is true even when the connection has been + * established using a resolvable random address. + */ + bacpy(&addrinfo.bdaddr, &hcon->src); + addrinfo.addr_type = hcon->src_type; + + smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), + &addrinfo); + + *keydist &= ~SMP_DIST_ID_KEY; + } + + if (*keydist & SMP_DIST_SIGN) { + struct smp_cmd_sign_info sign; + struct smp_csrk *csrk; + + /* Generate a new random key */ + get_random_bytes(sign.csrk, sizeof(sign.csrk)); + + csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); + if (csrk) { + csrk->master = 0x00; + memcpy(csrk->val, sign.csrk, sizeof(csrk->val)); + } + smp->slave_csrk = csrk; + + smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign); + + *keydist &= ~SMP_DIST_SIGN; + } + + /* If there are still keys to be received wait for them */ + if ((smp->remote_key_dist & 0x07)) + return; + + clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags); + set_bit(SMP_FLAG_COMPLETE, &smp->flags); + smp_notify_keys(conn); + + smp_chan_destroy(conn); +} + +static void smp_timeout(struct work_struct *work) +{ + struct smp_chan *smp = container_of(work, struct smp_chan, + security_timer.work); + struct l2cap_conn *conn = smp->conn; + + BT_DBG("conn %p", conn); + + l2cap_conn_shutdown(conn, ETIMEDOUT); +} + static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) { + struct l2cap_chan *chan = conn->smp; struct smp_chan *smp; smp = kzalloc(sizeof(*smp), GFP_ATOMIC); @@ -593,54 +852,20 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) } smp->conn = conn; - conn->smp_chan = smp; + chan->data = smp; + + INIT_WORK(&smp->distribute_work, smp_distribute_keys); + INIT_DELAYED_WORK(&smp->security_timer, smp_timeout); hci_conn_hold(conn->hcon); return smp; } -void smp_chan_destroy(struct l2cap_conn *conn) -{ - struct smp_chan *smp = conn->smp_chan; - bool complete; - - BUG_ON(!smp); - - complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); - mgmt_smp_complete(conn->hcon, complete); - - kfree(smp->csrk); - kfree(smp->slave_csrk); - - crypto_free_blkcipher(smp->tfm_aes); - - /* If pairing failed clean up any keys we might have */ - if (!complete) { - if (smp->ltk) { - list_del(&smp->ltk->list); - kfree(smp->ltk); - } - - if (smp->slave_ltk) { - list_del(&smp->slave_ltk->list); - kfree(smp->slave_ltk); - } - - if (smp->remote_irk) { - list_del(&smp->remote_irk->list); - kfree(smp->remote_irk); - } - } - - kfree(smp); - conn->smp_chan = NULL; - hci_conn_drop(conn->hcon); -} - int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) { struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_chan *chan; struct smp_chan *smp; u32 value; @@ -649,7 +874,11 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) if (!conn || !test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) return -ENOTCONN; - smp = conn->smp_chan; + chan = conn->smp; + if (!chan) + return -ENOTCONN; + + smp = chan->data; switch (mgmt_op) { case MGMT_OP_USER_PASSKEY_REPLY: @@ -696,10 +925,12 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) if (conn->hcon->role != HCI_ROLE_SLAVE) return SMP_CMD_NOTSUPP; - if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) + if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { smp = smp_chan_create(conn); - else - smp = conn->smp_chan; + } else { + struct l2cap_chan *chan = conn->smp; + smp = chan->data; + } if (!smp) return SMP_UNSPECIFIED; @@ -753,7 +984,8 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_pairing *req, *rsp = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; u8 key_size, auth = SMP_AUTH_NONE; int ret; @@ -814,7 +1046,8 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) { - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); @@ -837,7 +1070,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) { - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; BT_DBG("conn %p", conn); @@ -1010,7 +1244,8 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_encrypt_info *rp = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; BT_DBG("conn %p", conn); @@ -1031,7 +1266,8 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_master_ident *rp = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; struct hci_dev *hdev = conn->hcon->hdev; struct hci_conn *hcon = conn->hcon; struct smp_ltk *ltk; @@ -1058,7 +1294,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) rp->ediv, rp->rand); smp->ltk = ltk; if (!(smp->remote_key_dist & SMP_DIST_ID_KEY)) - smp_distribute_keys(conn); + queue_work(hdev->workqueue, &smp->distribute_work); hci_dev_unlock(hdev); return 0; @@ -1067,7 +1303,8 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb) static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_ident_info *info = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; BT_DBG(""); @@ -1089,8 +1326,10 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_ident_addr_info *info = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; bdaddr_t rpa; BT_DBG(""); @@ -1133,7 +1372,7 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, smp->id_addr_type, smp->irk, &rpa); distribute: - smp_distribute_keys(conn); + queue_work(hdev->workqueue, &smp->distribute_work); hci_dev_unlock(hcon->hdev); @@ -1143,7 +1382,8 @@ distribute: static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) { struct smp_cmd_sign_info *rp = (void *) skb->data; - struct smp_chan *smp = conn->smp_chan; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; struct hci_dev *hdev = conn->hcon->hdev; struct smp_csrk *csrk; @@ -1168,15 +1408,15 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) memcpy(csrk->val, rp->csrk, sizeof(csrk->val)); } smp->csrk = csrk; - if (!(smp->remote_key_dist & SMP_DIST_SIGN)) - smp_distribute_keys(conn); + queue_work(hdev->workqueue, &smp->distribute_work); hci_dev_unlock(hdev); return 0; } -int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) +static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb) { + struct l2cap_conn *conn = chan->conn; struct hci_conn *hcon = conn->hcon; __u8 code, reason; int err = 0; @@ -1186,10 +1426,8 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } - if (skb->len < 1) { - kfree_skb(skb); + if (skb->len < 1) return -EILSEQ; - } if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) { err = -EOPNOTSUPP; @@ -1207,10 +1445,11 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) * returns an error). */ if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ && - !conn->smp_chan) { + !test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) { BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code); - kfree_skb(skb); - return -EOPNOTSUPP; + reason = SMP_CMD_NOTSUPP; + err = -EOPNOTSUPP; + goto done; } switch (code) { @@ -1271,188 +1510,201 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) done: if (reason) smp_failure(conn, reason); - - kfree_skb(skb); + if (!err) + kfree_skb(skb); return err; } -static void smp_notify_keys(struct l2cap_conn *conn) +static void smp_teardown_cb(struct l2cap_chan *chan, int err) { - struct smp_chan *smp = conn->smp_chan; + struct l2cap_conn *conn = chan->conn; + + BT_DBG("chan %p", chan); + + if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) + smp_chan_destroy(conn); + + conn->smp = NULL; + l2cap_chan_put(chan); +} + +static void smp_resume_cb(struct l2cap_chan *chan) +{ + struct smp_chan *smp = chan->data; + struct l2cap_conn *conn = chan->conn; struct hci_conn *hcon = conn->hcon; struct hci_dev *hdev = hcon->hdev; - struct smp_cmd_pairing *req = (void *) &smp->preq[1]; - struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1]; - bool persistent; - if (smp->remote_irk) { - mgmt_new_irk(hdev, smp->remote_irk); - /* Now that user space can be considered to know the - * identity address track the connection based on it - * from now on. - */ - bacpy(&hcon->dst, &smp->remote_irk->bdaddr); - hcon->dst_type = smp->remote_irk->addr_type; - l2cap_conn_update_id_addr(hcon); + BT_DBG("chan %p", chan); - /* When receiving an indentity resolving key for - * a remote device that does not use a resolvable - * private address, just remove the key so that - * it is possible to use the controller white - * list for scanning. - * - * Userspace will have been told to not store - * this key at this point. So it is safe to - * just remove it. - */ - if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) { - list_del(&smp->remote_irk->list); - kfree(smp->remote_irk); - smp->remote_irk = NULL; - } - } + if (!smp) + return; - /* The LTKs and CSRKs should be persistent only if both sides - * had the bonding bit set in their authentication requests. - */ - persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING); + cancel_delayed_work(&smp->security_timer); - if (smp->csrk) { - smp->csrk->bdaddr_type = hcon->dst_type; - bacpy(&smp->csrk->bdaddr, &hcon->dst); - mgmt_new_csrk(hdev, smp->csrk, persistent); - } + if (test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) + queue_work(hdev->workqueue, &smp->distribute_work); +} - if (smp->slave_csrk) { - smp->slave_csrk->bdaddr_type = hcon->dst_type; - bacpy(&smp->slave_csrk->bdaddr, &hcon->dst); - mgmt_new_csrk(hdev, smp->slave_csrk, persistent); - } +static void smp_ready_cb(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; - if (smp->ltk) { - smp->ltk->bdaddr_type = hcon->dst_type; - bacpy(&smp->ltk->bdaddr, &hcon->dst); - mgmt_new_ltk(hdev, smp->ltk, persistent); - } + BT_DBG("chan %p", chan); - if (smp->slave_ltk) { - smp->slave_ltk->bdaddr_type = hcon->dst_type; - bacpy(&smp->slave_ltk->bdaddr, &hcon->dst); - mgmt_new_ltk(hdev, smp->slave_ltk, persistent); - } + conn->smp = chan; + l2cap_chan_hold(chan); } -int smp_distribute_keys(struct l2cap_conn *conn) +static int smp_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) { - struct smp_cmd_pairing *req, *rsp; - struct smp_chan *smp = conn->smp_chan; - struct hci_conn *hcon = conn->hcon; - struct hci_dev *hdev = hcon->hdev; - __u8 *keydist; + int err; - BT_DBG("conn %p", conn); + BT_DBG("chan %p", chan); - if (!test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) - return 0; + err = smp_sig_channel(chan, skb); + if (err) { + struct smp_chan *smp = chan->data; - rsp = (void *) &smp->prsp[1]; + if (smp) + cancel_delayed_work_sync(&smp->security_timer); - /* The responder sends its keys first */ - if (hcon->out && (smp->remote_key_dist & 0x07)) - return 0; + l2cap_conn_shutdown(chan->conn, -err); + } - req = (void *) &smp->preq[1]; + return err; +} - if (hcon->out) { - keydist = &rsp->init_key_dist; - *keydist &= req->init_key_dist; - } else { - keydist = &rsp->resp_key_dist; - *keydist &= req->resp_key_dist; - } +static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb) +{ + struct sk_buff *skb; - BT_DBG("keydist 0x%x", *keydist); + skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL); + if (!skb) + return ERR_PTR(-ENOMEM); - if (*keydist & SMP_DIST_ENC_KEY) { - struct smp_cmd_encrypt_info enc; - struct smp_cmd_master_ident ident; - struct smp_ltk *ltk; - u8 authenticated; - __le16 ediv; - __le64 rand; + skb->priority = HCI_PRIO_MAX; + bt_cb(skb)->chan = chan; - get_random_bytes(enc.ltk, sizeof(enc.ltk)); - get_random_bytes(&ediv, sizeof(ediv)); - get_random_bytes(&rand, sizeof(rand)); + return skb; +} - smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); +static const struct l2cap_ops smp_chan_ops = { + .name = "Security Manager", + .ready = smp_ready_cb, + .recv = smp_recv_cb, + .alloc_skb = smp_alloc_skb_cb, + .teardown = smp_teardown_cb, + .resume = smp_resume_cb, + + .new_connection = l2cap_chan_no_new_connection, + .state_change = l2cap_chan_no_state_change, + .close = l2cap_chan_no_close, + .defer = l2cap_chan_no_defer, + .suspend = l2cap_chan_no_suspend, + .set_shutdown = l2cap_chan_no_set_shutdown, + .get_sndtimeo = l2cap_chan_no_get_sndtimeo, + .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec, +}; - authenticated = hcon->sec_level == BT_SECURITY_HIGH; - ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, - SMP_LTK_SLAVE, authenticated, enc.ltk, - smp->enc_key_size, ediv, rand); - smp->slave_ltk = ltk; +static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan) +{ + struct l2cap_chan *chan; - ident.ediv = ediv; - ident.rand = rand; + BT_DBG("pchan %p", pchan); - smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident); + chan = l2cap_chan_create(); + if (!chan) + return NULL; - *keydist &= ~SMP_DIST_ENC_KEY; - } + chan->chan_type = pchan->chan_type; + chan->ops = &smp_chan_ops; + chan->scid = pchan->scid; + chan->dcid = chan->scid; + chan->imtu = pchan->imtu; + chan->omtu = pchan->omtu; + chan->mode = pchan->mode; - if (*keydist & SMP_DIST_ID_KEY) { - struct smp_cmd_ident_addr_info addrinfo; - struct smp_cmd_ident_info idinfo; + BT_DBG("created chan %p", chan); - memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk)); + return chan; +} - smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); +static const struct l2cap_ops smp_root_chan_ops = { + .name = "Security Manager Root", + .new_connection = smp_new_conn_cb, + + /* None of these are implemented for the root channel */ + .close = l2cap_chan_no_close, + .alloc_skb = l2cap_chan_no_alloc_skb, + .recv = l2cap_chan_no_recv, + .state_change = l2cap_chan_no_state_change, + .teardown = l2cap_chan_no_teardown, + .ready = l2cap_chan_no_ready, + .defer = l2cap_chan_no_defer, + .suspend = l2cap_chan_no_suspend, + .resume = l2cap_chan_no_resume, + .set_shutdown = l2cap_chan_no_set_shutdown, + .get_sndtimeo = l2cap_chan_no_get_sndtimeo, + .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec, +}; - /* The hci_conn contains the local identity address - * after the connection has been established. - * - * This is true even when the connection has been - * established using a resolvable random address. - */ - bacpy(&addrinfo.bdaddr, &hcon->src); - addrinfo.addr_type = hcon->src_type; +int smp_register(struct hci_dev *hdev) +{ + struct l2cap_chan *chan; + struct crypto_blkcipher *tfm_aes; - smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), - &addrinfo); + BT_DBG("%s", hdev->name); - *keydist &= ~SMP_DIST_ID_KEY; + tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm_aes)) { + int err = PTR_ERR(tfm_aes); + BT_ERR("Unable to create crypto context"); + return err; } - if (*keydist & SMP_DIST_SIGN) { - struct smp_cmd_sign_info sign; - struct smp_csrk *csrk; + chan = l2cap_chan_create(); + if (!chan) { + crypto_free_blkcipher(tfm_aes); + return -ENOMEM; + } - /* Generate a new random key */ - get_random_bytes(sign.csrk, sizeof(sign.csrk)); + chan->data = tfm_aes; - csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); - if (csrk) { - csrk->master = 0x00; - memcpy(csrk->val, sign.csrk, sizeof(csrk->val)); - } - smp->slave_csrk = csrk; + l2cap_add_scid(chan, L2CAP_CID_SMP); - smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign); + l2cap_chan_set_defaults(chan); - *keydist &= ~SMP_DIST_SIGN; - } + bacpy(&chan->src, &hdev->bdaddr); + chan->src_type = BDADDR_LE_PUBLIC; + chan->state = BT_LISTEN; + chan->mode = L2CAP_MODE_BASIC; + chan->imtu = L2CAP_DEFAULT_MTU; + chan->ops = &smp_root_chan_ops; - /* If there are still keys to be received wait for them */ - if ((smp->remote_key_dist & 0x07)) - return 0; + hdev->smp_data = chan; - clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags); - cancel_delayed_work_sync(&conn->security_timer); - set_bit(SMP_FLAG_COMPLETE, &smp->flags); - smp_notify_keys(conn); + return 0; +} - smp_chan_destroy(conn); +void smp_unregister(struct hci_dev *hdev) +{ + struct l2cap_chan *chan = hdev->smp_data; + struct crypto_blkcipher *tfm_aes; - return 0; + if (!chan) + return; + + BT_DBG("%s chan %p", hdev->name, chan); + + tfm_aes = chan->data; + if (tfm_aes) { + chan->data = NULL; + crypto_free_blkcipher(tfm_aes); + } + + hdev->smp_data = NULL; + l2cap_chan_put(chan); } diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h index 796f4f45f92f..cf1094617c69 100644 --- a/net/bluetooth/smp.h +++ b/net/bluetooth/smp.h @@ -126,14 +126,12 @@ enum { /* SMP Commands */ bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level); int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); -int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); -int smp_distribute_keys(struct l2cap_conn *conn); int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); -void smp_chan_destroy(struct l2cap_conn *conn); +bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr); +int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa); -bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16], - bdaddr_t *bdaddr); -int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa); +int smp_register(struct hci_dev *hdev); +void smp_unregister(struct hci_dev *hdev); #endif /* __SMP_H */ diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 078d336a1f37..a9f54a9b6690 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -252,12 +252,12 @@ static void del_nbp(struct net_bridge_port *p) br_fdb_delete_by_port(br, p, 1); nbp_update_port_count(br); + netdev_upper_dev_unlink(dev, br->dev); + dev->priv_flags &= ~IFF_BRIDGE_PORT; netdev_rx_handler_unregister(dev); - netdev_upper_dev_unlink(dev, br->dev); - br_multicast_del_port(p); kobject_uevent(&p->kobj, KOBJ_REMOVE); @@ -476,16 +476,16 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (err) goto err3; - err = netdev_master_upper_dev_link(dev, br->dev); + err = netdev_rx_handler_register(dev, br_handle_frame, p); if (err) goto err4; - err = netdev_rx_handler_register(dev, br_handle_frame, p); + dev->priv_flags |= IFF_BRIDGE_PORT; + + err = netdev_master_upper_dev_link(dev, br->dev); if (err) goto err5; - dev->priv_flags |= IFF_BRIDGE_PORT; - dev_disable_lro(dev); list_add_rcu(&p->list, &br->port_list); @@ -520,7 +520,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) return 0; err5: - netdev_upper_dev_unlink(dev, br->dev); + dev->priv_flags &= ~IFF_BRIDGE_PORT; + netdev_rx_handler_unregister(dev); err4: br_netpoll_disable(p); err3: diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 7751c92c8c57..648d79ccf462 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1822,7 +1822,7 @@ static void br_multicast_query_expired(struct net_bridge *br, if (query->startup_sent < br->multicast_startup_query_count) query->startup_sent++; - rcu_assign_pointer(querier, NULL); + RCU_INIT_POINTER(querier, NULL); br_multicast_send_query(br, NULL, query); spin_unlock(&br->multicast_lock); } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index cb5fcf62f663..90a91e137acc 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -257,9 +257,6 @@ static int br_afspec(struct net_bridge *br, } else err = br_vlan_add(br, vinfo->vid, vinfo->flags); - if (err) - break; - break; case RTM_DELLINK: @@ -276,7 +273,7 @@ static int br_afspec(struct net_bridge *br, return err; } -static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = { +static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, [IFLA_BRPORT_COST] = { .type = NLA_U32 }, [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, @@ -382,7 +379,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh) if (p && protinfo) { if (protinfo->nla_type & NLA_F_NESTED) { err = nla_parse_nested(tb, IFLA_BRPORT_MAX, - protinfo, ifla_brport_policy); + protinfo, br_port_policy); if (err) return err; @@ -461,6 +458,88 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev, return register_netdevice(dev); } +static int br_port_slave_changelink(struct net_device *brdev, + struct net_device *dev, + struct nlattr *tb[], + struct nlattr *data[]) +{ + if (!data) + return 0; + return br_setport(br_port_get_rtnl(dev), data); +} + +static int br_port_fill_slave_info(struct sk_buff *skb, + const struct net_device *brdev, + const struct net_device *dev) +{ + return br_port_fill_attrs(skb, br_port_get_rtnl(dev)); +} + +static size_t br_port_get_slave_size(const struct net_device *brdev, + const struct net_device *dev) +{ + return br_port_info_size(); +} + +static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = { + [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 }, + [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 }, + [IFLA_BR_MAX_AGE] = { .type = NLA_U32 }, +}; + +static int br_changelink(struct net_device *brdev, struct nlattr *tb[], + struct nlattr *data[]) +{ + struct net_bridge *br = netdev_priv(brdev); + int err; + + if (!data) + return 0; + + if (data[IFLA_BR_FORWARD_DELAY]) { + err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY])); + if (err) + return err; + } + + if (data[IFLA_BR_HELLO_TIME]) { + err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME])); + if (err) + return err; + } + + if (data[IFLA_BR_MAX_AGE]) { + err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE])); + if (err) + return err; + } + + return 0; +} + +static size_t br_get_size(const struct net_device *brdev) +{ + return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */ + nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */ + 0; +} + +static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) +{ + struct net_bridge *br = netdev_priv(brdev); + u32 forward_delay = jiffies_to_clock_t(br->forward_delay); + u32 hello_time = jiffies_to_clock_t(br->hello_time); + u32 age_time = jiffies_to_clock_t(br->max_age); + + if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) || + nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) || + nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time)) + return -EMSGSIZE; + + return 0; +} + static size_t br_get_link_af_size(const struct net_device *dev) { struct net_port_vlans *pv; @@ -485,12 +564,23 @@ static struct rtnl_af_ops br_af_ops = { }; struct rtnl_link_ops br_link_ops __read_mostly = { - .kind = "bridge", - .priv_size = sizeof(struct net_bridge), - .setup = br_dev_setup, - .validate = br_validate, - .newlink = br_dev_newlink, - .dellink = br_dev_delete, + .kind = "bridge", + .priv_size = sizeof(struct net_bridge), + .setup = br_dev_setup, + .maxtype = IFLA_BRPORT_MAX, + .policy = br_policy, + .validate = br_validate, + .newlink = br_dev_newlink, + .changelink = br_changelink, + .dellink = br_dev_delete, + .get_size = br_get_size, + .fill_info = br_fill_info, + + .slave_maxtype = IFLA_BRPORT_MAX, + .slave_policy = br_port_policy, + .slave_changelink = br_port_slave_changelink, + .get_slave_size = br_port_get_slave_size, + .fill_slave_info = br_port_fill_slave_info, }; int __init br_netlink_init(void) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 6d69631b9f4d..d9a8c05d995d 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -26,6 +26,7 @@ #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/cpumask.h> +#include <linux/audit.h> #include <net/sock.h> /* needed for logical [in,out]-dev filtering */ #include "../br_private.h" @@ -1058,6 +1059,20 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, vfree(table); vfree(counterstmp); + +#ifdef CONFIG_AUDIT + if (audit_enabled) { + struct audit_buffer *ab; + + ab = audit_log_start(current->audit_context, GFP_KERNEL, + AUDIT_NETFILTER_CFG); + if (ab) { + audit_log_format(ab, "table=%s family=%u entries=%u", + repl->name, AF_BRIDGE, repl->nentries); + audit_log_end(ab); + } + } +#endif return ret; free_unlock: diff --git a/net/core/dev.c b/net/core/dev.c index cf8a95f48cff..db0388607329 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -897,23 +897,25 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) EXPORT_SYMBOL(dev_getfirstbyhwtype); /** - * dev_get_by_flags_rcu - find any device with given flags + * __dev_get_by_flags - find any device with given flags * @net: the applicable net namespace * @if_flags: IFF_* values * @mask: bitmask of bits in if_flags to check * * Search for any interface with the given flags. Returns NULL if a device * is not found or a pointer to the device. Must be called inside - * rcu_read_lock(), and result refcount is unchanged. + * rtnl_lock(), and result refcount is unchanged. */ -struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags, - unsigned short mask) +struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, + unsigned short mask) { struct net_device *dev, *ret; + ASSERT_RTNL(); + ret = NULL; - for_each_netdev_rcu(net, dev) { + for_each_netdev(net, dev) { if (((dev->flags ^ if_flags) & mask) == 0) { ret = dev; break; @@ -921,7 +923,7 @@ struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags } return ret; } -EXPORT_SYMBOL(dev_get_by_flags_rcu); +EXPORT_SYMBOL(__dev_get_by_flags); /** * dev_valid_name - check if name is okay for network device @@ -2177,6 +2179,53 @@ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) return (struct dev_kfree_skb_cb *)skb->cb; } +void netif_schedule_queue(struct netdev_queue *txq) +{ + rcu_read_lock(); + if (!(txq->state & QUEUE_STATE_ANY_XOFF)) { + struct Qdisc *q = rcu_dereference(txq->qdisc); + + __netif_schedule(q); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(netif_schedule_queue); + +/** + * netif_wake_subqueue - allow sending packets on subqueue + * @dev: network device + * @queue_index: sub queue index + * + * Resume individual transmit queue of a device with multiple transmit queues. + */ +void netif_wake_subqueue(struct net_device *dev, u16 queue_index) +{ + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); + + if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) { + struct Qdisc *q; + + rcu_read_lock(); + q = rcu_dereference(txq->qdisc); + __netif_schedule(q); + rcu_read_unlock(); + } +} +EXPORT_SYMBOL(netif_wake_subqueue); + +void netif_tx_wake_queue(struct netdev_queue *dev_queue) +{ + if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { + struct Qdisc *q; + + rcu_read_lock(); + q = rcu_dereference(dev_queue->qdisc); + __netif_schedule(q); + rcu_read_unlock(); + } +} +EXPORT_SYMBOL(netif_tx_wake_queue); + void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) { unsigned long flags; @@ -2485,52 +2534,6 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) return 0; } -struct dev_gso_cb { - void (*destructor)(struct sk_buff *skb); -}; - -#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) - -static void dev_gso_skb_destructor(struct sk_buff *skb) -{ - struct dev_gso_cb *cb; - - kfree_skb_list(skb->next); - skb->next = NULL; - - cb = DEV_GSO_CB(skb); - if (cb->destructor) - cb->destructor(skb); -} - -/** - * dev_gso_segment - Perform emulated hardware segmentation on skb. - * @skb: buffer to segment - * @features: device features as applicable to this skb - * - * This function segments the given skb and stores the list of segments - * in skb->next. - */ -static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) -{ - struct sk_buff *segs; - - segs = skb_gso_segment(skb, features); - - /* Verifying header integrity only. */ - if (!segs) - return 0; - - if (IS_ERR(segs)) - return PTR_ERR(segs); - - skb->next = segs; - DEV_GSO_CB(skb)->destructor = skb->destructor; - skb->destructor = dev_gso_skb_destructor; - - return 0; -} - /* If MPLS offload request, verify we are testing hardware MPLS features * instead of standard features for the netdev. */ @@ -2605,119 +2608,127 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) } EXPORT_SYMBOL(netif_skb_features); -int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq) +static int xmit_one(struct sk_buff *skb, struct net_device *dev, + struct netdev_queue *txq, bool more) { - const struct net_device_ops *ops = dev->netdev_ops; - int rc = NETDEV_TX_OK; - unsigned int skb_len; + unsigned int len; + int rc; - if (likely(!skb->next)) { - netdev_features_t features; + if (!list_empty(&ptype_all)) + dev_queue_xmit_nit(skb, dev); - /* - * If device doesn't need skb->dst, release it right now while - * its hot in this cpu cache - */ - if (dev->priv_flags & IFF_XMIT_DST_RELEASE) - skb_dst_drop(skb); + len = skb->len; + trace_net_dev_start_xmit(skb, dev); + rc = netdev_start_xmit(skb, dev, txq, more); + trace_net_dev_xmit(skb, rc, dev, len); - features = netif_skb_features(skb); - - if (vlan_tx_tag_present(skb) && - !vlan_hw_offload_capable(features, skb->vlan_proto)) { - skb = __vlan_put_tag(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); - if (unlikely(!skb)) - goto out; + return rc; +} - skb->vlan_tci = 0; - } +struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev, + struct netdev_queue *txq, int *ret) +{ + struct sk_buff *skb = first; + int rc = NETDEV_TX_OK; - /* If encapsulation offload request, verify we are testing - * hardware encapsulation features instead of standard - * features for the netdev - */ - if (skb->encapsulation) - features &= dev->hw_enc_features; + while (skb) { + struct sk_buff *next = skb->next; - if (netif_needs_gso(skb, features)) { - if (unlikely(dev_gso_segment(skb, features))) - goto out_kfree_skb; - if (skb->next) - goto gso; - } else { - if (skb_needs_linearize(skb, features) && - __skb_linearize(skb)) - goto out_kfree_skb; + skb->next = NULL; + rc = xmit_one(skb, dev, txq, next != NULL); + if (unlikely(!dev_xmit_complete(rc))) { + skb->next = next; + goto out; + } - /* If packet is not checksummed and device does not - * support checksumming for this protocol, complete - * checksumming here. - */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (skb->encapsulation) - skb_set_inner_transport_header(skb, - skb_checksum_start_offset(skb)); - else - skb_set_transport_header(skb, - skb_checksum_start_offset(skb)); - if (!(features & NETIF_F_ALL_CSUM) && - skb_checksum_help(skb)) - goto out_kfree_skb; - } + skb = next; + if (netif_xmit_stopped(txq) && skb) { + rc = NETDEV_TX_BUSY; + break; } + } - if (!list_empty(&ptype_all)) - dev_queue_xmit_nit(skb, dev); +out: + *ret = rc; + return skb; +} - skb_len = skb->len; - trace_net_dev_start_xmit(skb, dev); - rc = ops->ndo_start_xmit(skb, dev); - trace_net_dev_xmit(skb, rc, dev, skb_len); - if (rc == NETDEV_TX_OK) - txq_trans_update(txq); - return rc; +struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features) +{ + if (vlan_tx_tag_present(skb) && + !vlan_hw_offload_capable(features, skb->vlan_proto)) { + skb = __vlan_put_tag(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); + if (skb) + skb->vlan_tci = 0; } + return skb; +} -gso: - do { - struct sk_buff *nskb = skb->next; +struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) +{ + netdev_features_t features; - skb->next = nskb->next; - nskb->next = NULL; + if (skb->next) + return skb; - if (!list_empty(&ptype_all)) - dev_queue_xmit_nit(nskb, dev); - - skb_len = nskb->len; - trace_net_dev_start_xmit(nskb, dev); - rc = ops->ndo_start_xmit(nskb, dev); - trace_net_dev_xmit(nskb, rc, dev, skb_len); - if (unlikely(rc != NETDEV_TX_OK)) { - if (rc & ~NETDEV_TX_MASK) - goto out_kfree_gso_skb; - nskb->next = skb->next; - skb->next = nskb; - return rc; + /* If device doesn't need skb->dst, release it right now while + * its hot in this cpu cache + */ + if (dev->priv_flags & IFF_XMIT_DST_RELEASE) + skb_dst_drop(skb); + + features = netif_skb_features(skb); + skb = validate_xmit_vlan(skb, features); + if (unlikely(!skb)) + goto out_null; + + /* If encapsulation offload request, verify we are testing + * hardware encapsulation features instead of standard + * features for the netdev + */ + if (skb->encapsulation) + features &= dev->hw_enc_features; + + if (netif_needs_gso(skb, features)) { + struct sk_buff *segs; + + segs = skb_gso_segment(skb, features); + if (IS_ERR(segs)) { + segs = NULL; + } else if (segs) { + consume_skb(skb); + skb = segs; } - txq_trans_update(txq); - if (unlikely(netif_xmit_stopped(txq) && skb->next)) - return NETDEV_TX_BUSY; - } while (skb->next); + } else { + if (skb_needs_linearize(skb, features) && + __skb_linearize(skb)) + goto out_kfree_skb; -out_kfree_gso_skb: - if (likely(skb->next == NULL)) { - skb->destructor = DEV_GSO_CB(skb)->destructor; - consume_skb(skb); - return rc; + /* If packet is not checksummed and device does not + * support checksumming for this protocol, complete + * checksumming here. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->encapsulation) + skb_set_inner_transport_header(skb, + skb_checksum_start_offset(skb)); + else + skb_set_transport_header(skb, + skb_checksum_start_offset(skb)); + if (!(features & NETIF_F_ALL_CSUM) && + skb_checksum_help(skb)) + goto out_kfree_skb; + } } + + return skb; + out_kfree_skb: kfree_skb(skb); -out: - return rc; +out_null: + return NULL; } -EXPORT_SYMBOL_GPL(dev_hard_start_xmit); static void qdisc_pkt_len_init(struct sk_buff *skb) { @@ -2785,7 +2796,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_bstats_update(q, skb); - if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { + skb = validate_xmit_skb(skb, dev); + if (skb && sch_direct_xmit(skb, q, dev, txq, root_lock)) { if (unlikely(contended)) { spin_unlock(&q->busylock); contended = false; @@ -2925,11 +2937,15 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) goto recursion_alert; + skb = validate_xmit_skb(skb, dev); + if (!skb) + goto drop; + HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { __this_cpu_inc(xmit_recursion); - rc = dev_hard_start_xmit(skb, dev, txq); + skb = dev_hard_start_xmit(skb, dev, txq, &rc); __this_cpu_dec(xmit_recursion); if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); @@ -2950,10 +2966,11 @@ recursion_alert: } rc = -ENETDOWN; +drop: rcu_read_unlock_bh(); atomic_long_inc(&dev->tx_dropped); - kfree_skb(skb); + kfree_skb_list(skb); return rc; out: rcu_read_unlock_bh(); @@ -3130,8 +3147,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, } if (map) { - tcpu = map->cpus[((u64) hash * map->len) >> 32]; - + tcpu = map->cpus[reciprocal_scale(hash, map->len)]; if (cpu_online(tcpu)) { cpu = tcpu; goto done; @@ -3467,7 +3483,7 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl); skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS); - q = rxq->qdisc; + q = rcu_dereference(rxq->qdisc); if (q != &noop_qdisc) { spin_lock(qdisc_lock(q)); if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) @@ -3484,7 +3500,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb, { struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); - if (!rxq || rxq->qdisc == &noop_qdisc) + if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc) goto out; if (*pt_prev) { @@ -3965,11 +3981,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (!(skb->dev->features & NETIF_F_GRO)) goto normal; - if (skb_is_gso(skb) || skb_has_frag_list(skb)) + if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad) goto normal; gro_list_prepare(napi, skb); - NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */ rcu_read_lock(); list_for_each_entry_rcu(ptype, head, list) { @@ -3983,6 +3998,22 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->udp_mark = 0; + /* Setup for GRO checksum validation */ + switch (skb->ip_summed) { + case CHECKSUM_COMPLETE: + NAPI_GRO_CB(skb)->csum = skb->csum; + NAPI_GRO_CB(skb)->csum_valid = 1; + NAPI_GRO_CB(skb)->csum_cnt = 0; + break; + case CHECKSUM_UNNECESSARY: + NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; + NAPI_GRO_CB(skb)->csum_valid = 0; + break; + default: + NAPI_GRO_CB(skb)->csum_cnt = 0; + NAPI_GRO_CB(skb)->csum_valid = 0; + } + pp = ptype->callbacks.gro_receive(&napi->gro_list, skb); break; } @@ -4212,6 +4243,31 @@ gro_result_t napi_gro_frags(struct napi_struct *napi) } EXPORT_SYMBOL(napi_gro_frags); +/* Compute the checksum from gro_offset and return the folded value + * after adding in any pseudo checksum. + */ +__sum16 __skb_gro_checksum_complete(struct sk_buff *skb) +{ + __wsum wsum; + __sum16 sum; + + wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); + + /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ + sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); + if (likely(!sum)) { + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && + !skb->csum_complete_sw) + netdev_rx_csum_fault(skb->dev); + } + + NAPI_GRO_CB(skb)->csum = wsum; + NAPI_GRO_CB(skb)->csum_valid = 1; + + return sum; +} +EXPORT_SYMBOL(__skb_gro_checksum_complete); + /* * net_rps_action_and_irq_enable sends any pending IPI's for rps. * Note: called with local irq disabled, but exits with local irq enabled. diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index cf999e09bcd2..72e899a3efda 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -365,11 +365,8 @@ void dev_load(struct net *net, const char *name) no_module = !dev; if (no_module && capable(CAP_NET_ADMIN)) no_module = request_module("netdev-%s", name); - if (no_module && capable(CAP_SYS_MODULE)) { - if (!request_module("%s", name)) - pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n", - name); - } + if (no_module && capable(CAP_SYS_MODULE)) + request_module("%s", name); } EXPORT_SYMBOL(dev_load); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 17cb912793fa..27e61b886520 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1621,6 +1621,80 @@ static int ethtool_get_module_eeprom(struct net_device *dev, modinfo.eeprom_len); } +static int ethtool_tunable_valid(const struct ethtool_tunable *tuna) +{ + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + if (tuna->len != sizeof(u32) || + tuna->type_id != ETHTOOL_TUNABLE_U32) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr) +{ + int ret; + struct ethtool_tunable tuna; + const struct ethtool_ops *ops = dev->ethtool_ops; + void *data; + + if (!ops->get_tunable) + return -EOPNOTSUPP; + if (copy_from_user(&tuna, useraddr, sizeof(tuna))) + return -EFAULT; + ret = ethtool_tunable_valid(&tuna); + if (ret) + return ret; + data = kmalloc(tuna.len, GFP_USER); + if (!data) + return -ENOMEM; + ret = ops->get_tunable(dev, &tuna, data); + if (ret) + goto out; + useraddr += sizeof(tuna); + ret = -EFAULT; + if (copy_to_user(useraddr, data, tuna.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr) +{ + int ret; + struct ethtool_tunable tuna; + const struct ethtool_ops *ops = dev->ethtool_ops; + void *data; + + if (!ops->set_tunable) + return -EOPNOTSUPP; + if (copy_from_user(&tuna, useraddr, sizeof(tuna))) + return -EFAULT; + ret = ethtool_tunable_valid(&tuna); + if (ret) + return ret; + data = kmalloc(tuna.len, GFP_USER); + if (!data) + return -ENOMEM; + useraddr += sizeof(tuna); + ret = -EFAULT; + if (copy_from_user(data, useraddr, tuna.len)) + goto out; + ret = ops->set_tunable(dev, &tuna, data); + +out: + kfree(data); + return ret; +} + /* The main entry point in this file. Called from net/core/dev_ioctl.c */ int dev_ethtool(struct net *net, struct ifreq *ifr) @@ -1670,6 +1744,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GCHANNELS: case ETHTOOL_GET_TS_INFO: case ETHTOOL_GEEE: + case ETHTOOL_GTUNABLE: break; default: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) @@ -1857,6 +1932,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GMODULEEEPROM: rc = ethtool_get_module_eeprom(dev, useraddr); break; + case ETHTOOL_GTUNABLE: + rc = ethtool_get_tunable(dev, useraddr); + break; + case ETHTOOL_STUNABLE: + rc = ethtool_set_tunable(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } diff --git a/net/core/filter.c b/net/core/filter.c index d814b8a89d0f..fcd3f6742a6a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -87,33 +87,9 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(sk_filter); -/* Helper to find the offset of pkt_type in sk_buff structure. We want - * to make sure its still a 3bit field starting at a byte boundary; - * taken from arch/x86/net/bpf_jit_comp.c. - */ -#ifdef __BIG_ENDIAN_BITFIELD -#define PKT_TYPE_MAX (7 << 5) -#else -#define PKT_TYPE_MAX 7 -#endif -static unsigned int pkt_type_offset(void) -{ - struct sk_buff skb_probe = { .pkt_type = ~0, }; - u8 *ct = (u8 *) &skb_probe; - unsigned int off; - - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (ct[off] == PKT_TYPE_MAX) - return off; - } - - pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__); - return -1; -} - static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { - return __skb_get_poff((struct sk_buff *)(unsigned long) ctx); + return skb_get_poff((struct sk_buff *)(unsigned long) ctx); } static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) @@ -190,11 +166,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp, break; case SKF_AD_OFF + SKF_AD_PKTTYPE: - *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX, - pkt_type_offset()); - if (insn->off < 0) - return false; - insn++; + *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX, + PKT_TYPE_OFFSET()); *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); #ifdef __BIG_ENDIAN_BITFIELD insn++; @@ -933,7 +906,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) /* Expand fp for appending the new filter representation. */ old_fp = fp; - fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL); + fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); if (!fp) { /* The old_fp is still around in case we couldn't * allocate new memory, so uncharge on that one. @@ -972,7 +945,7 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp) int err; fp->bpf_func = NULL; - fp->jited = 0; + fp->jited = false; err = bpf_check_classic(fp->insns, fp->len); if (err) { @@ -1013,7 +986,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) if (fprog->filter == NULL) return -EINVAL; - fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL); + fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!fp) return -ENOMEM; @@ -1069,12 +1042,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) if (fprog->filter == NULL) return -EINVAL; - prog = kmalloc(bpf_fsize, GFP_KERNEL); + prog = bpf_prog_alloc(bpf_fsize, 0); if (!prog) return -ENOMEM; if (copy_from_user(prog->insns, fprog->filter, fsize)) { - kfree(prog); + __bpf_prog_free(prog); return -EFAULT; } @@ -1082,7 +1055,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) err = bpf_prog_store_orig_filter(prog, fprog); if (err) { - kfree(prog); + __bpf_prog_free(prog); return -ENOMEM; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 5f362c1d0332..8560dea58803 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -13,6 +13,7 @@ #include <linux/if_pppox.h> #include <linux/ppp_defs.h> #include <net/flow_keys.h> +#include <scsi/fc/fc_fcoe.h> /* copy saddr & daddr, possibly using 64bit load/store * Equivalent to : flow->src = iph->saddr; @@ -26,36 +27,61 @@ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *i } /** - * skb_flow_get_ports - extract the upper layer ports and return them - * @skb: buffer to extract the ports from + * __skb_flow_get_ports - extract the upper layer ports and return them + * @skb: sk_buff to extract the ports from * @thoff: transport header offset * @ip_proto: protocol for which to get port offset + * @data: raw buffer pointer to the packet, if NULL use skb->data + * @hlen: packet header length, if @data is NULL use skb_headlen(skb) * * The function will try to retrieve the ports at offset thoff + poff where poff * is the protocol port offset returned from proto_ports_offset */ -__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) +__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + void *data, int hlen) { int poff = proto_ports_offset(ip_proto); + if (!data) { + data = skb->data; + hlen = skb_headlen(skb); + } + if (poff >= 0) { __be32 *ports, _ports; - ports = skb_header_pointer(skb, thoff + poff, - sizeof(_ports), &_ports); + ports = __skb_header_pointer(skb, thoff + poff, + sizeof(_ports), data, hlen, &_ports); if (ports) return *ports; } return 0; } -EXPORT_SYMBOL(skb_flow_get_ports); +EXPORT_SYMBOL(__skb_flow_get_ports); -bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) +/** + * __skb_flow_dissect - extract the flow_keys struct and return it + * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified + * @data: raw buffer pointer to the packet, if NULL use skb->data + * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol + * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb) + * @hlen: packet header length, if @data is NULL use skb_headlen(skb) + * + * The function will try to retrieve the struct flow_keys from either the skbuff + * or a raw buffer specified by the rest parameters + */ +bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, + void *data, __be16 proto, int nhoff, int hlen) { - int nhoff = skb_network_offset(skb); u8 ip_proto; - __be16 proto = skb->protocol; + + if (!data) { + data = skb->data; + proto = skb->protocol; + nhoff = skb_network_offset(skb); + hlen = skb_headlen(skb); + } memset(flow, 0, sizeof(*flow)); @@ -65,7 +91,7 @@ again: const struct iphdr *iph; struct iphdr _iph; ip: - iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); + iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); if (!iph || iph->ihl < 5) return false; nhoff += iph->ihl * 4; @@ -83,7 +109,7 @@ ip: __be32 flow_label; ipv6: - iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); + iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); if (!iph) return false; @@ -92,6 +118,13 @@ ipv6: flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); nhoff += sizeof(struct ipv6hdr); + /* skip the flow label processing if skb is NULL. The + * assumption here is that if there is no skb we are not + * looking for flow info as much as we are length. + */ + if (!skb) + break; + flow_label = ip6_flowlabel(iph); if (flow_label) { /* Awesome, IPv6 packet has a flow label so we can @@ -113,7 +146,7 @@ ipv6: const struct vlan_hdr *vlan; struct vlan_hdr _vlan; - vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan); + vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); if (!vlan) return false; @@ -126,7 +159,7 @@ ipv6: struct pppoe_hdr hdr; __be16 proto; } *hdr, _hdr; - hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; proto = hdr->proto; @@ -140,6 +173,9 @@ ipv6: return false; } } + case htons(ETH_P_FCOE): + flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN); + /* fall through */ default: return false; } @@ -151,7 +187,7 @@ ipv6: __be16 proto; } *hdr, _hdr; - hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; /* @@ -171,8 +207,9 @@ ipv6: const struct ethhdr *eth; struct ethhdr _eth; - eth = skb_header_pointer(skb, nhoff, - sizeof(_eth), &_eth); + eth = __skb_header_pointer(skb, nhoff, + sizeof(_eth), + data, hlen, &_eth); if (!eth) return false; proto = eth->h_proto; @@ -194,12 +231,12 @@ ipv6: flow->n_proto = proto; flow->ip_proto = ip_proto; - flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto); + flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); flow->thoff = (u16) nhoff; return true; } -EXPORT_SYMBOL(skb_flow_dissect); +EXPORT_SYMBOL(__skb_flow_dissect); static u32 hashrnd __read_mostly; static __always_inline void __flow_hash_secret_init(void) @@ -286,30 +323,22 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, qcount = dev->tc_to_txq[tc].count; } - return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset; + return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; } EXPORT_SYMBOL(__skb_tx_hash); -/* __skb_get_poff() returns the offset to the payload as far as it could - * be dissected. The main user is currently BPF, so that we can dynamically - * truncate packets without needing to push actual payload to the user - * space and can analyze headers only, instead. - */ -u32 __skb_get_poff(const struct sk_buff *skb) +u32 __skb_get_poff(const struct sk_buff *skb, void *data, + const struct flow_keys *keys, int hlen) { - struct flow_keys keys; - u32 poff = 0; - - if (!skb_flow_dissect(skb, &keys)) - return 0; + u32 poff = keys->thoff; - poff += keys.thoff; - switch (keys.ip_proto) { + switch (keys->ip_proto) { case IPPROTO_TCP: { const struct tcphdr *tcph; struct tcphdr _tcph; - tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph); + tcph = __skb_header_pointer(skb, poff, sizeof(_tcph), + data, hlen, &_tcph); if (!tcph) return poff; @@ -343,6 +372,21 @@ u32 __skb_get_poff(const struct sk_buff *skb) return poff; } +/* skb_get_poff() returns the offset to the payload as far as it could + * be dissected. The main user is currently BPF, so that we can dynamically + * truncate packets without needing to push actual payload to the user + * space and can analyze headers only, instead. + */ +u32 skb_get_poff(const struct sk_buff *skb) +{ + struct flow_keys keys; + + if (!skb_flow_dissect(skb, &keys)) + return 0; + + return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb)); +} + static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_XPS @@ -359,9 +403,8 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) if (map->len == 1) queue_index = map->queues[0]; else - queue_index = map->queues[ - ((u64)skb_get_hash(skb) * map->len) >> 32]; - + queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), + map->len)]; if (unlikely(queue_index >= dev->real_num_tx_queues)) queue_index = -1; } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 7c6b51a58968..7f155175bba8 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -224,7 +224,7 @@ static void net_free(struct net *net) return; } #endif - kfree(net->gen); + kfree(rcu_access_pointer(net->gen)); kmem_cache_free(net_cachep, net); } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 907fb5e36c02..e6645b4f330a 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -72,7 +72,6 @@ module_param(carrier_timeout, uint, 0644); static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) { - const struct net_device_ops *ops = dev->netdev_ops; int status = NETDEV_TX_OK; netdev_features_t features; @@ -92,9 +91,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, skb->vlan_tci = 0; } - status = ops->ndo_start_xmit(skb, dev); - if (status == NETDEV_TX_OK) - txq_trans_update(txq); + status = netdev_start_xmit(skb, dev, txq, false); out: return status; @@ -116,7 +113,7 @@ static void queue_process(struct work_struct *work) continue; } - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + txq = skb_get_tx_queue(dev, skb); local_irq_save(flags); HARD_TX_LOCK(dev, txq, smp_processor_id()); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 8b849ddfef2e..5c728aaf8d6c 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -202,6 +202,7 @@ #define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ #define F_NODE (1<<15) /* Node memory alloc*/ #define F_UDPCSUM (1<<16) /* Include UDP checksum */ +#define F_NO_TIMESTAMP (1<<17) /* Don't timestamp packets (default TS) */ /* Thread control flag bits */ #define T_STOP (1<<0) /* Stop run */ @@ -505,7 +506,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf, pktgen_reset_all_threads(pn); else - pr_warning("Unknown command: %s\n", data); + pr_warn("Unknown command: %s\n", data); return count; } @@ -638,6 +639,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->flags & F_UDPCSUM) seq_puts(seq, "UDPCSUM "); + if (pkt_dev->flags & F_NO_TIMESTAMP) + seq_puts(seq, "NO_TIMESTAMP "); + if (pkt_dev->flags & F_MPLS_RND) seq_puts(seq, "MPLS_RND "); @@ -857,14 +861,14 @@ static ssize_t pktgen_if_write(struct file *file, pg_result = &(pkt_dev->result[0]); if (count < 1) { - pr_warning("wrong command format\n"); + pr_warn("wrong command format\n"); return -EINVAL; } max = count; tmp = count_trail_chars(user_buffer, max); if (tmp < 0) { - pr_warning("illegal format\n"); + pr_warn("illegal format\n"); return tmp; } i = tmp; @@ -1243,6 +1247,9 @@ static ssize_t pktgen_if_write(struct file *file, else if (strcmp(f, "!UDPCSUM") == 0) pkt_dev->flags &= ~F_UDPCSUM; + else if (strcmp(f, "NO_TIMESTAMP") == 0) + pkt_dev->flags |= F_NO_TIMESTAMP; + else { sprintf(pg_result, "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", @@ -1251,6 +1258,7 @@ static ssize_t pktgen_if_write(struct file *file, "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, " "MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, " "QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, " + "NO_TIMESTAMP, " #ifdef CONFIG_XFRM "IPSEC, " #endif @@ -2048,15 +2056,15 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) ntxq = pkt_dev->odev->real_num_tx_queues; if (ntxq <= pkt_dev->queue_map_min) { - pr_warning("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", - pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, - pkt_dev->odevname); + pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", + pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, + pkt_dev->odevname); pkt_dev->queue_map_min = (ntxq ?: 1) - 1; } if (pkt_dev->queue_map_max >= ntxq) { - pr_warning("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", - pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, - pkt_dev->odevname); + pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", + pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, + pkt_dev->odevname); pkt_dev->queue_map_max = (ntxq ?: 1) - 1; } @@ -2685,9 +2693,14 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, pgh->pgh_magic = htonl(PKTGEN_MAGIC); pgh->seq_num = htonl(pkt_dev->seq_num); - do_gettimeofday(×tamp); - pgh->tv_sec = htonl(timestamp.tv_sec); - pgh->tv_usec = htonl(timestamp.tv_usec); + if (pkt_dev->flags & F_NO_TIMESTAMP) { + pgh->tv_sec = 0; + pgh->tv_usec = 0; + } else { + do_gettimeofday(×tamp); + pgh->tv_sec = htonl(timestamp.tv_sec); + pgh->tv_usec = htonl(timestamp.tv_usec); + } } static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, @@ -3160,8 +3173,8 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev) int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; if (!pkt_dev->running) { - pr_warning("interface: %s is already stopped\n", - pkt_dev->odevname); + pr_warn("interface: %s is already stopped\n", + pkt_dev->odevname); return -EINVAL; } @@ -3285,10 +3298,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) static void pktgen_xmit(struct pktgen_dev *pkt_dev) { struct net_device *odev = pkt_dev->odev; - netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *) - = odev->netdev_ops->ndo_start_xmit; struct netdev_queue *txq; - u16 queue_map; int ret; /* If device is offline, then don't send */ @@ -3326,8 +3336,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) if (pkt_dev->delay && pkt_dev->last_ok) spin(pkt_dev, pkt_dev->next_tx); - queue_map = skb_get_queue_mapping(pkt_dev->skb); - txq = netdev_get_tx_queue(odev, queue_map); + txq = skb_get_tx_queue(odev, pkt_dev->skb); local_bh_disable(); @@ -3339,11 +3348,10 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) goto unlock; } atomic_inc(&(pkt_dev->skb->users)); - ret = (*xmit)(pkt_dev->skb, odev); + ret = netdev_start_xmit(pkt_dev->skb, odev, txq, false); switch (ret) { case NETDEV_TX_OK: - txq_trans_update(txq); pkt_dev->last_ok = 1; pkt_dev->sofar++; pkt_dev->seq_num++; @@ -3684,7 +3692,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, pr_debug("remove_device pkt_dev=%p\n", pkt_dev); if (pkt_dev->running) { - pr_warning("WARNING: trying to remove a running interface, stopping it now\n"); + pr_warn("WARNING: trying to remove a running interface, stopping it now\n"); pktgen_stop_device(pkt_dev); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index f0493e3b7471..a6882686ca3a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1481,9 +1481,12 @@ static int do_set_master(struct net_device *dev, int ifindex) return 0; } +#define DO_SETLINK_MODIFIED 0x01 +/* notify flag means notify + modified. */ +#define DO_SETLINK_NOTIFY 0x03 static int do_setlink(const struct sk_buff *skb, struct net_device *dev, struct ifinfomsg *ifm, - struct nlattr **tb, char *ifname, int modified) + struct nlattr **tb, char *ifname, int status) { const struct net_device_ops *ops = dev->netdev_ops; int err; @@ -1502,7 +1505,7 @@ static int do_setlink(const struct sk_buff *skb, put_net(net); if (err) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_MAP]) { @@ -1531,7 +1534,7 @@ static int do_setlink(const struct sk_buff *skb, if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } if (tb[IFLA_ADDRESS]) { @@ -1551,19 +1554,19 @@ static int do_setlink(const struct sk_buff *skb, kfree(sa); if (err) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_MTU]) { err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_GROUP]) { dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); - modified = 1; + status |= DO_SETLINK_NOTIFY; } /* @@ -1575,7 +1578,7 @@ static int do_setlink(const struct sk_buff *skb, err = dev_change_name(dev, ifname); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_IFALIAS]) { @@ -1583,7 +1586,7 @@ static int do_setlink(const struct sk_buff *skb, nla_len(tb[IFLA_IFALIAS])); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } if (tb[IFLA_BROADCAST]) { @@ -1601,25 +1604,35 @@ static int do_setlink(const struct sk_buff *skb, err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER])); if (err) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_CARRIER]) { err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); if (err) goto errout; - modified = 1; + status |= DO_SETLINK_MODIFIED; } - if (tb[IFLA_TXQLEN]) - dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); + if (tb[IFLA_TXQLEN]) { + unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]); + + if (dev->tx_queue_len ^ value) + status |= DO_SETLINK_NOTIFY; + + dev->tx_queue_len = value; + } if (tb[IFLA_OPERSTATE]) set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); if (tb[IFLA_LINKMODE]) { + unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]); + write_lock_bh(&dev_base_lock); - dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); + if (dev->link_mode ^ value) + status |= DO_SETLINK_NOTIFY; + dev->link_mode = value; write_unlock_bh(&dev_base_lock); } @@ -1634,7 +1647,7 @@ static int do_setlink(const struct sk_buff *skb, err = do_setvfinfo(dev, attr); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } } err = 0; @@ -1664,7 +1677,7 @@ static int do_setlink(const struct sk_buff *skb, err = ops->ndo_set_vf_port(dev, vf, port); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } } err = 0; @@ -1682,7 +1695,7 @@ static int do_setlink(const struct sk_buff *skb, err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port); if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } if (tb[IFLA_AF_SPEC]) { @@ -1699,15 +1712,20 @@ static int do_setlink(const struct sk_buff *skb, if (err < 0) goto errout; - modified = 1; + status |= DO_SETLINK_NOTIFY; } } err = 0; errout: - if (err < 0 && modified) - net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", - dev->name); + if (status & DO_SETLINK_MODIFIED) { + if (status & DO_SETLINK_NOTIFY) + netdev_state_change(dev); + + if (err < 0) + net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n", + dev->name); + } return err; } @@ -1989,7 +2007,7 @@ replay: } if (dev) { - int modified = 0; + int status = 0; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; @@ -2004,7 +2022,7 @@ replay: err = ops->changelink(dev, tb, data); if (err < 0) return err; - modified = 1; + status |= DO_SETLINK_NOTIFY; } if (linkinfo[IFLA_INFO_SLAVE_DATA]) { @@ -2015,10 +2033,10 @@ replay: tb, slave_data); if (err < 0) return err; - modified = 1; + status |= DO_SETLINK_NOTIFY; } - return do_setlink(skb, dev, ifm, tb, ifname, modified); + return do_setlink(skb, dev, ifm, tb, ifname, status); } if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index ba71212f0251..51dd3193a33e 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -35,7 +35,7 @@ static u32 seq_scale(u32 seq) * overlaps less than one time per MSL (2 minutes). * Choosing a clock of 64 ns period is OK. (period of 274 s) */ - return seq + (ktime_to_ns(ktime_get_real()) >> 6); + return seq + (ktime_get_real_ns() >> 6); } #endif @@ -135,7 +135,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, md5_transform(hash, net_secret); seq = hash[0] | (((u64)hash[1]) << 32); - seq += ktime_to_ns(ktime_get_real()); + seq += ktime_get_real_ns(); seq &= (1ull << 48) - 1; return seq; @@ -163,7 +163,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr, md5_transform(hash, secret); seq = hash[0] | (((u64)hash[1]) << 32); - seq += ktime_to_ns(ktime_get_real()); + seq += ktime_get_real_ns(); seq &= (1ull << 48) - 1; return seq; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index da1378a3e2c7..06a8feb10099 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3491,32 +3491,66 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(sock_queue_err_skb); -void __skb_tstamp_tx(struct sk_buff *orig_skb, - struct skb_shared_hwtstamps *hwtstamps, - struct sock *sk, int tstype) +struct sk_buff *sock_dequeue_err_skb(struct sock *sk) { - struct sock_exterr_skb *serr; - struct sk_buff *skb; - int err; + struct sk_buff_head *q = &sk->sk_error_queue; + struct sk_buff *skb, *skb_next; + int err = 0; - if (!sk) - return; + spin_lock_bh(&q->lock); + skb = __skb_dequeue(q); + if (skb && (skb_next = skb_peek(q))) + err = SKB_EXT_ERR(skb_next)->ee.ee_errno; + spin_unlock_bh(&q->lock); - if (hwtstamps) { - *skb_hwtstamps(orig_skb) = - *hwtstamps; - } else { - /* - * no hardware time stamps available, - * so keep the shared tx_flags and only - * store software time stamp - */ - orig_skb->tstamp = ktime_get_real(); + sk->sk_err = err; + if (err) + sk->sk_error_report(sk); + + return skb; +} +EXPORT_SYMBOL(sock_dequeue_err_skb); + +/** + * skb_clone_sk - create clone of skb, and take reference to socket + * @skb: the skb to clone + * + * This function creates a clone of a buffer that holds a reference on + * sk_refcnt. Buffers created via this function are meant to be + * returned using sock_queue_err_skb, or free via kfree_skb. + * + * When passing buffers allocated with this function to sock_queue_err_skb + * it is necessary to wrap the call with sock_hold/sock_put in order to + * prevent the socket from being released prior to being enqueued on + * the sk_error_queue. + */ +struct sk_buff *skb_clone_sk(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sk_buff *clone; + + if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) + return NULL; + + clone = skb_clone(skb, GFP_ATOMIC); + if (!clone) { + sock_put(sk); + return NULL; } - skb = skb_clone(orig_skb, GFP_ATOMIC); - if (!skb) - return; + clone->sk = sk; + clone->destructor = sock_efree; + + return clone; +} +EXPORT_SYMBOL(skb_clone_sk); + +static void __skb_complete_tx_timestamp(struct sk_buff *skb, + struct sock *sk, + int tstype) +{ + struct sock_exterr_skb *serr; + int err; serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); @@ -3534,6 +3568,42 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if (err) kfree_skb(skb); } + +void skb_complete_tx_timestamp(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps) +{ + struct sock *sk = skb->sk; + + /* take a reference to prevent skb_orphan() from freeing the socket */ + sock_hold(sk); + + *skb_hwtstamps(skb) = *hwtstamps; + __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); + + sock_put(sk); +} +EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); + +void __skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps, + struct sock *sk, int tstype) +{ + struct sk_buff *skb; + + if (!sk) + return; + + if (hwtstamps) + *skb_hwtstamps(orig_skb) = *hwtstamps; + else + orig_skb->tstamp = ktime_get_real(); + + skb = skb_clone(orig_skb, GFP_ATOMIC); + if (!skb) + return; + + __skb_complete_tx_timestamp(skb, sk, tstype); +} EXPORT_SYMBOL_GPL(__skb_tstamp_tx); void skb_tstamp_tx(struct sk_buff *orig_skb, @@ -3558,9 +3628,14 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; + /* take a reference to prevent skb_orphan() from freeing the socket */ + sock_hold(sk); + err = sock_queue_err_skb(sk, skb); if (err) kfree_skb(skb); + + sock_put(sk); } EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); @@ -3861,7 +3936,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, return false; if (len <= skb_tailroom(to)) { - BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); + if (len) + BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); *delta_truesize = 0; return true; } @@ -4026,3 +4102,81 @@ err_free: return NULL; } EXPORT_SYMBOL(skb_vlan_untag); + +/** + * alloc_skb_with_frags - allocate skb with page frags + * + * header_len: size of linear part + * data_len: needed length in frags + * max_page_order: max page order desired. + * errcode: pointer to error code if any + * gfp_mask: allocation mask + * + * This can be used to allocate a paged skb, given a maximal order for frags. + */ +struct sk_buff *alloc_skb_with_frags(unsigned long header_len, + unsigned long data_len, + int max_page_order, + int *errcode, + gfp_t gfp_mask) +{ + int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + unsigned long chunk; + struct sk_buff *skb; + struct page *page; + gfp_t gfp_head; + int i; + + *errcode = -EMSGSIZE; + /* Note this test could be relaxed, if we succeed to allocate + * high order pages... + */ + if (npages > MAX_SKB_FRAGS) + return NULL; + + gfp_head = gfp_mask; + if (gfp_head & __GFP_WAIT) + gfp_head |= __GFP_REPEAT; + + *errcode = -ENOBUFS; + skb = alloc_skb(header_len, gfp_head); + if (!skb) + return NULL; + + skb->truesize += npages << PAGE_SHIFT; + + for (i = 0; npages > 0; i++) { + int order = max_page_order; + + while (order) { + if (npages >= 1 << order) { + page = alloc_pages(gfp_mask | + __GFP_COMP | + __GFP_NOWARN | + __GFP_NORETRY, + order); + if (page) + goto fill_page; + /* Do not retry other high order allocations */ + order = 1; + max_page_order = 0; + } + order--; + } + page = alloc_page(gfp_mask); + if (!page) + goto failure; +fill_page: + chunk = min_t(unsigned long, data_len, + PAGE_SIZE << order); + skb_fill_page_desc(skb, i, page, 0, chunk); + data_len -= chunk; + npages -= 1 << order; + } + return skb; + +failure: + kfree_skb(skb); + return NULL; +} +EXPORT_SYMBOL(alloc_skb_with_frags); diff --git a/net/core/sock.c b/net/core/sock.c index 9c3f823e76a9..e5ad7d31c3c2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -437,7 +437,6 @@ static void sock_disable_timestamp(struct sock *sk, unsigned long flags) int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; - int skb_len; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; @@ -459,13 +458,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) skb->dev = NULL; skb_set_owner_r(skb, sk); - /* Cache the SKB length before we tack it onto the receive - * queue. Once it is added it no longer belongs to us and - * may be freed by other threads of control pulling packets - * from the queue. - */ - skb_len = skb->len; - /* we escape from rcu protected region, make sure we dont leak * a norefcounted dst */ @@ -1645,18 +1637,24 @@ void sock_rfree(struct sk_buff *skb) } EXPORT_SYMBOL(sock_rfree); +void sock_efree(struct sk_buff *skb) +{ + sock_put(skb->sk); +} +EXPORT_SYMBOL(sock_efree); + +#ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb) { struct sock *sk = skb->sk; -#ifdef CONFIG_INET if (sk->sk_state == TCP_TIME_WAIT) inet_twsk_put(inet_twsk(sk)); else -#endif sock_put(sk); } EXPORT_SYMBOL(sock_edemux); +#endif kuid_t sock_i_uid(struct sock *sk) { @@ -1764,21 +1762,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, int *errcode, int max_page_order) { - struct sk_buff *skb = NULL; - unsigned long chunk; - gfp_t gfp_mask; + struct sk_buff *skb; long timeo; int err; - int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - struct page *page; - int i; - - err = -EMSGSIZE; - if (npages > MAX_SKB_FRAGS) - goto failure; timeo = sock_sndtimeo(sk, noblock); - while (!skb) { + for (;;) { err = sock_error(sk); if (err != 0) goto failure; @@ -1787,66 +1776,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, if (sk->sk_shutdown & SEND_SHUTDOWN) goto failure; - if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) { - set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - err = -EAGAIN; - if (!timeo) - goto failure; - if (signal_pending(current)) - goto interrupted; - timeo = sock_wait_for_wmem(sk, timeo); - continue; - } - - err = -ENOBUFS; - gfp_mask = sk->sk_allocation; - if (gfp_mask & __GFP_WAIT) - gfp_mask |= __GFP_REPEAT; + if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) + break; - skb = alloc_skb(header_len, gfp_mask); - if (!skb) + set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + err = -EAGAIN; + if (!timeo) goto failure; - - skb->truesize += data_len; - - for (i = 0; npages > 0; i++) { - int order = max_page_order; - - while (order) { - if (npages >= 1 << order) { - page = alloc_pages(sk->sk_allocation | - __GFP_COMP | - __GFP_NOWARN | - __GFP_NORETRY, - order); - if (page) - goto fill_page; - /* Do not retry other high order allocations */ - order = 1; - max_page_order = 0; - } - order--; - } - page = alloc_page(sk->sk_allocation); - if (!page) - goto failure; -fill_page: - chunk = min_t(unsigned long, data_len, - PAGE_SIZE << order); - skb_fill_page_desc(skb, i, page, 0, chunk); - data_len -= chunk; - npages -= 1 << order; - } + if (signal_pending(current)) + goto interrupted; + timeo = sock_wait_for_wmem(sk, timeo); } - - skb_set_owner_w(skb, sk); + skb = alloc_skb_with_frags(header_len, data_len, max_page_order, + errcode, sk->sk_allocation); + if (skb) + skb_set_owner_w(skb, sk); return skb; interrupted: err = sock_intr_errno(timeo); failure: - kfree_skb(skb); *errcode = err; return NULL; } @@ -2498,11 +2448,11 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, int type) { struct sock_exterr_skb *serr; - struct sk_buff *skb, *skb2; + struct sk_buff *skb; int copied, err; err = -EAGAIN; - skb = skb_dequeue(&sk->sk_error_queue); + skb = sock_dequeue_err_skb(sk); if (skb == NULL) goto out; @@ -2523,16 +2473,6 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, msg->msg_flags |= MSG_ERRQUEUE; err = copied; - /* Reset and regenerate socket error */ - spin_lock_bh(&sk->sk_error_queue.lock); - sk->sk_err = 0; - if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { - sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; - spin_unlock_bh(&sk->sk_error_queue.lock); - sk->sk_error_report(sk); - } else - spin_unlock_bh(&sk->sk_error_queue.lock); - out_free_skb: kfree_skb(skb); out: diff --git a/net/core/timestamping.c b/net/core/timestamping.c index a8770391ea5b..43d3dd62fcc8 100644 --- a/net/core/timestamping.c +++ b/net/core/timestamping.c @@ -36,10 +36,9 @@ void skb_clone_tx_timestamp(struct sk_buff *skb) { struct phy_device *phydev; struct sk_buff *clone; - struct sock *sk = skb->sk; unsigned int type; - if (!sk) + if (!skb->sk) return; type = classify(skb); @@ -48,50 +47,14 @@ void skb_clone_tx_timestamp(struct sk_buff *skb) phydev = skb->dev->phydev; if (likely(phydev->drv->txtstamp)) { - if (!atomic_inc_not_zero(&sk->sk_refcnt)) + clone = skb_clone_sk(skb); + if (!clone) return; - - clone = skb_clone(skb, GFP_ATOMIC); - if (!clone) { - sock_put(sk); - return; - } - - clone->sk = sk; phydev->drv->txtstamp(phydev, clone, type); } } EXPORT_SYMBOL_GPL(skb_clone_tx_timestamp); -void skb_complete_tx_timestamp(struct sk_buff *skb, - struct skb_shared_hwtstamps *hwtstamps) -{ - struct sock *sk = skb->sk; - struct sock_exterr_skb *serr; - int err; - - if (!hwtstamps) { - sock_put(sk); - kfree_skb(skb); - return; - } - - *skb_hwtstamps(skb) = *hwtstamps; - - serr = SKB_EXT_ERR(skb); - memset(serr, 0, sizeof(*serr)); - serr->ee.ee_errno = ENOMSG; - serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; - skb->sk = NULL; - - err = sock_queue_err_skb(sk, skb); - - sock_put(sk); - if (err) - kfree_skb(skb); -} -EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); - bool skb_defer_rx_timestamp(struct sk_buff *skb) { struct phy_device *phydev; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index ae011b46c071..25733d538147 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -127,6 +127,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat #include <linux/stat.h> #include <linux/init.h> #include <linux/poll.h> +#include <linux/jiffies.h> #include <net/net_namespace.h> #include <net/neighbour.h> #include <net/dst.h> @@ -598,7 +599,7 @@ int dn_destroy_timer(struct sock *sk) if (sk->sk_socket) return 0; - if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) { + if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) { dn_unhash_sock(sk); sock_put(sk); return 1; diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 3b726f31c64c..4400da7739da 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -41,6 +41,7 @@ #include <linux/sysctl.h> #include <linux/notifier.h> #include <linux/slab.h> +#include <linux/jiffies.h> #include <asm/uaccess.h> #include <net/net_namespace.h> #include <net/neighbour.h> @@ -875,7 +876,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa) static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa) { /* First check time since device went up */ - if ((jiffies - dn_db->uptime) < DRDELAY) + if (time_before(jiffies, dn_db->uptime + DRDELAY)) return 0; /* If there is no router, then yes... */ diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c index d9c150cc59a9..1d330fd43dc7 100644 --- a/net/decnet/dn_timer.c +++ b/net/decnet/dn_timer.c @@ -23,6 +23,7 @@ #include <linux/spinlock.h> #include <net/sock.h> #include <linux/atomic.h> +#include <linux/jiffies.h> #include <net/flow.h> #include <net/dn.h> @@ -91,7 +92,7 @@ static void dn_slow_timer(unsigned long arg) * since the last successful transmission. */ if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) { - if ((jiffies - scp->stamp) >= scp->keepalive) + if (time_after_eq(jiffies, scp->stamp + scp->keepalive)) scp->keepalive_fxn(sk); } diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index f5eede1d6cb8..a585fd6352eb 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -12,6 +12,9 @@ config NET_DSA if NET_DSA # tagging formats +config NET_DSA_TAG_BRCM + bool + config NET_DSA_TAG_DSA bool diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 7b9fcbbeda5d..da06ed1df620 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_NET_DSA) += dsa_core.o dsa_core-y += dsa.o slave.o # tagging formats +dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 0a49632fac47..6905f2d84c44 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -10,7 +10,6 @@ */ #include <linux/list.h> -#include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> @@ -44,7 +43,7 @@ void unregister_switch_driver(struct dsa_switch_driver *drv) EXPORT_SYMBOL_GPL(unregister_switch_driver); static struct dsa_switch_driver * -dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name) +dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name) { struct dsa_switch_driver *ret; struct list_head *list; @@ -59,7 +58,7 @@ dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name) drv = list_entry(list, struct dsa_switch_driver, list); - name = drv->probe(bus, sw_addr); + name = drv->probe(host_dev, sw_addr); if (name != NULL) { ret = drv; break; @@ -76,7 +75,7 @@ dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name) /* basic switch operations **************************************************/ static struct dsa_switch * dsa_switch_setup(struct dsa_switch_tree *dst, int index, - struct device *parent, struct mii_bus *bus) + struct device *parent, struct device *host_dev) { struct dsa_chip_data *pd = dst->pd->chip + index; struct dsa_switch_driver *drv; @@ -89,7 +88,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, /* * Probe for switch model. */ - drv = dsa_switch_probe(bus, pd->sw_addr, &name); + drv = dsa_switch_probe(host_dev, pd->sw_addr, &name); if (drv == NULL) { printk(KERN_ERR "%s[%d]: could not detect attached switch\n", dst->master_netdev->name, index); @@ -110,8 +109,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, ds->index = index; ds->pd = dst->pd->chip + index; ds->drv = drv; - ds->master_mii_bus = bus; - + ds->master_dev = host_dev; /* * Validate supplied switch configuration. @@ -144,14 +142,44 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, goto out; } + /* Make the built-in MII bus mask match the number of ports, + * switch drivers can override this later + */ + ds->phys_mii_mask = ds->phys_port_mask; + /* * If the CPU connects to this switch, set the switch tree * tagging protocol to the preferred tagging format of this * switch. */ - if (ds->dst->cpu_switch == index) - ds->dst->tag_protocol = drv->tag_protocol; + if (dst->cpu_switch == index) { + switch (drv->tag_protocol) { +#ifdef CONFIG_NET_DSA_TAG_DSA + case DSA_TAG_PROTO_DSA: + dst->rcv = dsa_netdev_ops.rcv; + break; +#endif +#ifdef CONFIG_NET_DSA_TAG_EDSA + case DSA_TAG_PROTO_EDSA: + dst->rcv = edsa_netdev_ops.rcv; + break; +#endif +#ifdef CONFIG_NET_DSA_TAG_TRAILER + case DSA_TAG_PROTO_TRAILER: + dst->rcv = trailer_netdev_ops.rcv; + break; +#endif +#ifdef CONFIG_NET_DSA_TAG_BRCM + case DSA_TAG_PROTO_BRCM: + dst->rcv = brcm_netdev_ops.rcv; + break; +#endif + default: + break; + } + dst->tag_protocol = drv->tag_protocol; + } /* * Do basic register setup. @@ -210,6 +238,49 @@ static void dsa_switch_destroy(struct dsa_switch *ds) { } +static int dsa_switch_suspend(struct dsa_switch *ds) +{ + int i, ret = 0; + + /* Suspend slave network devices */ + for (i = 0; i < DSA_MAX_PORTS; i++) { + if (!(ds->phys_port_mask & (1 << i))) + continue; + + ret = dsa_slave_suspend(ds->ports[i]); + if (ret) + return ret; + } + + if (ds->drv->suspend) + ret = ds->drv->suspend(ds); + + return ret; +} + +static int dsa_switch_resume(struct dsa_switch *ds) +{ + int i, ret = 0; + + if (ds->drv->resume) + ret = ds->drv->resume(ds); + + if (ret) + return ret; + + /* Resume slave network devices */ + for (i = 0; i < DSA_MAX_PORTS; i++) { + if (!(ds->phys_port_mask & (1 << i))) + continue; + + ret = dsa_slave_resume(ds->ports[i]); + if (ret) + return ret; + } + + return 0; +} + /* link polling *************************************************************/ static void dsa_link_poll_work(struct work_struct *ugly) @@ -256,7 +327,7 @@ static struct device *dev_find_class(struct device *parent, char *class) return device_find_child(parent, class, dev_is_class); } -static struct mii_bus *dev_to_mii_bus(struct device *dev) +struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev) { struct device *d; @@ -272,6 +343,7 @@ static struct mii_bus *dev_to_mii_bus(struct device *dev) return NULL; } +EXPORT_SYMBOL_GPL(dsa_host_dev_to_mii_bus); static struct net_device *dev_to_net_device(struct device *dev) { @@ -410,7 +482,8 @@ static int dsa_of_probe(struct platform_device *pdev) chip_index++; cd = &pd->chip[chip_index]; - cd->mii_bus = &mdio_bus->dev; + cd->of_node = child; + cd->host_dev = &mdio_bus->dev; sw_addr = of_get_property(child, "reg", NULL); if (!sw_addr) @@ -431,6 +504,8 @@ static int dsa_of_probe(struct platform_device *pdev) if (!port_name) continue; + cd->port_dn[port_index] = port; + cd->port_names[port_index] = kstrdup(port_name, GFP_KERNEL); if (!cd->port_names[port_index]) { @@ -534,17 +609,9 @@ static int dsa_probe(struct platform_device *pdev) dst->cpu_port = -1; for (i = 0; i < pd->nr_chips; i++) { - struct mii_bus *bus; struct dsa_switch *ds; - bus = dev_to_mii_bus(pd->chip[i].mii_bus); - if (bus == NULL) { - printk(KERN_ERR "%s[%d]: no mii bus found for " - "dsa switch\n", dev->name, i); - continue; - } - - ds = dsa_switch_setup(dst, i, &pdev->dev, bus); + ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev); if (IS_ERR(ds)) { printk(KERN_ERR "%s[%d]: couldn't create dsa switch " "instance (error %ld)\n", dev->name, i, @@ -608,7 +675,62 @@ static void dsa_shutdown(struct platform_device *pdev) { } +static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + struct dsa_switch_tree *dst = dev->dsa_ptr; + + if (unlikely(dst == NULL)) { + kfree_skb(skb); + return 0; + } + + return dst->rcv(skb, dev, pt, orig_dev); +} + +static struct packet_type dsa_pack_type __read_mostly = { + .type = cpu_to_be16(ETH_P_XDSA), + .func = dsa_switch_rcv, +}; + +#ifdef CONFIG_PM_SLEEP +static int dsa_suspend(struct device *d) +{ + struct platform_device *pdev = to_platform_device(d); + struct dsa_switch_tree *dst = platform_get_drvdata(pdev); + int i, ret = 0; + + for (i = 0; i < dst->pd->nr_chips; i++) { + struct dsa_switch *ds = dst->ds[i]; + + if (ds != NULL) + ret = dsa_switch_suspend(ds); + } + + return ret; +} + +static int dsa_resume(struct device *d) +{ + struct platform_device *pdev = to_platform_device(d); + struct dsa_switch_tree *dst = platform_get_drvdata(pdev); + int i, ret = 0; + + for (i = 0; i < dst->pd->nr_chips; i++) { + struct dsa_switch *ds = dst->ds[i]; + + if (ds != NULL) + ret = dsa_switch_resume(ds); + } + + return ret; +} +#endif + +static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume); + static const struct of_device_id dsa_of_match_table[] = { + { .compatible = "brcm,bcm7445-switch-v4.0" }, { .compatible = "marvell,dsa", }, {} }; @@ -622,6 +744,7 @@ static struct platform_driver dsa_driver = { .name = "dsa", .owner = THIS_MODULE, .of_match_table = dsa_of_match_table, + .pm = &dsa_pm_ops, }, }; @@ -633,30 +756,15 @@ static int __init dsa_init_module(void) if (rc) return rc; -#ifdef CONFIG_NET_DSA_TAG_DSA - dev_add_pack(&dsa_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_EDSA - dev_add_pack(&edsa_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_TRAILER - dev_add_pack(&trailer_packet_type); -#endif + dev_add_pack(&dsa_pack_type); + return 0; } module_init(dsa_init_module); static void __exit dsa_cleanup_module(void) { -#ifdef CONFIG_NET_DSA_TAG_TRAILER - dev_remove_pack(&trailer_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_EDSA - dev_remove_pack(&edsa_packet_type); -#endif -#ifdef CONFIG_NET_DSA_TAG_DSA - dev_remove_pack(&dsa_packet_type); -#endif + dev_remove_pack(&dsa_pack_type); platform_driver_unregister(&dsa_driver); } module_exit(dsa_cleanup_module); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index d4cf5cc747e3..dc9756d3154c 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -12,7 +12,13 @@ #define __DSA_PRIV_H #include <linux/phy.h> -#include <net/dsa.h> +#include <linux/netdevice.h> + +struct dsa_device_ops { + netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); + int (*rcv)(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev); +}; struct dsa_slave_priv { /* @@ -20,6 +26,8 @@ struct dsa_slave_priv { * switch port. */ struct net_device *dev; + netdev_tx_t (*xmit)(struct sk_buff *skb, + struct net_device *dev); /* * Which switch this port is a part of, and the port index @@ -33,28 +41,35 @@ struct dsa_slave_priv { * to this port. */ struct phy_device *phy; + phy_interface_t phy_interface; + int old_link; + int old_pause; + int old_duplex; }; /* dsa.c */ extern char dsa_driver_version[]; /* slave.c */ +extern const struct dsa_device_ops notag_netdev_ops; void dsa_slave_mii_bus_init(struct dsa_switch *ds); struct net_device *dsa_slave_create(struct dsa_switch *ds, struct device *parent, int port, char *name); +int dsa_slave_suspend(struct net_device *slave_dev); +int dsa_slave_resume(struct net_device *slave_dev); /* tag_dsa.c */ -netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev); -extern struct packet_type dsa_packet_type; +extern const struct dsa_device_ops dsa_netdev_ops; /* tag_edsa.c */ -netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev); -extern struct packet_type edsa_packet_type; +extern const struct dsa_device_ops edsa_netdev_ops; /* tag_trailer.c */ -netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev); -extern struct packet_type trailer_packet_type; +extern const struct dsa_device_ops trailer_netdev_ops; + +/* tag_brcm.c */ +extern const struct dsa_device_ops brcm_netdev_ops; #endif diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 45a1e34c89e0..43c1e4ade689 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -9,9 +9,10 @@ */ #include <linux/list.h> -#include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/phy.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> #include "dsa_priv.h" /* slave mii_bus handling ***************************************************/ @@ -19,7 +20,7 @@ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) { struct dsa_switch *ds = bus->priv; - if (ds->phys_port_mask & (1 << addr)) + if (ds->phys_mii_mask & (1 << addr)) return ds->drv->phy_read(ds, addr, reg); return 0xffff; @@ -29,7 +30,7 @@ static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) { struct dsa_switch *ds = bus->priv; - if (ds->phys_port_mask & (1 << addr)) + if (ds->phys_mii_mask & (1 << addr)) return ds->drv->phy_write(ds, addr, reg, val); return 0; @@ -43,7 +44,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) ds->slave_mii_bus->write = dsa_slave_phy_write; snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x", ds->index, ds->pd->sw_addr); - ds->slave_mii_bus->parent = &ds->master_mii_bus->dev; + ds->slave_mii_bus->parent = ds->master_dev; } @@ -171,6 +172,24 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } +static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + + return p->xmit(skb, dev); +} + +static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + + skb->dev = p->parent->dst->master_netdev; + dev_queue_xmit(skb); + + return NETDEV_TX_OK; +} + /* ethtool operations *******************************************************/ static int @@ -282,6 +301,27 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset) return -EOPNOTSUPP; } +static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->get_wol) + ds->drv->get_wol(ds, p->port, w); +} + +static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret = -EOPNOTSUPP; + + if (ds->drv->set_wol) + ret = ds->drv->set_wol(ds, p->port, w); + + return ret; +} + static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_settings = dsa_slave_get_settings, .set_settings = dsa_slave_set_settings, @@ -291,46 +331,141 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_strings = dsa_slave_get_strings, .get_ethtool_stats = dsa_slave_get_ethtool_stats, .get_sset_count = dsa_slave_get_sset_count, + .set_wol = dsa_slave_set_wol, + .get_wol = dsa_slave_get_wol, }; -#ifdef CONFIG_NET_DSA_TAG_DSA -static const struct net_device_ops dsa_netdev_ops = { +static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_init = dsa_slave_init, .ndo_open = dsa_slave_open, .ndo_stop = dsa_slave_close, - .ndo_start_xmit = dsa_xmit, + .ndo_start_xmit = dsa_slave_xmit, .ndo_change_rx_flags = dsa_slave_change_rx_flags, .ndo_set_rx_mode = dsa_slave_set_rx_mode, .ndo_set_mac_address = dsa_slave_set_mac_address, .ndo_do_ioctl = dsa_slave_ioctl, }; -#endif -#ifdef CONFIG_NET_DSA_TAG_EDSA -static const struct net_device_ops edsa_netdev_ops = { - .ndo_init = dsa_slave_init, - .ndo_open = dsa_slave_open, - .ndo_stop = dsa_slave_close, - .ndo_start_xmit = edsa_xmit, - .ndo_change_rx_flags = dsa_slave_change_rx_flags, - .ndo_set_rx_mode = dsa_slave_set_rx_mode, - .ndo_set_mac_address = dsa_slave_set_mac_address, - .ndo_do_ioctl = dsa_slave_ioctl, -}; -#endif -#ifdef CONFIG_NET_DSA_TAG_TRAILER -static const struct net_device_ops trailer_netdev_ops = { - .ndo_init = dsa_slave_init, - .ndo_open = dsa_slave_open, - .ndo_stop = dsa_slave_close, - .ndo_start_xmit = trailer_xmit, - .ndo_change_rx_flags = dsa_slave_change_rx_flags, - .ndo_set_rx_mode = dsa_slave_set_rx_mode, - .ndo_set_mac_address = dsa_slave_set_mac_address, - .ndo_do_ioctl = dsa_slave_ioctl, -}; -#endif + +static void dsa_slave_adjust_link(struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + unsigned int status_changed = 0; + + if (p->old_link != p->phy->link) { + status_changed = 1; + p->old_link = p->phy->link; + } + + if (p->old_duplex != p->phy->duplex) { + status_changed = 1; + p->old_duplex = p->phy->duplex; + } + + if (p->old_pause != p->phy->pause) { + status_changed = 1; + p->old_pause = p->phy->pause; + } + + if (ds->drv->adjust_link && status_changed) + ds->drv->adjust_link(ds, p->port, p->phy); + + if (status_changed) + phy_print_status(p->phy); +} + +static int dsa_slave_fixed_link_update(struct net_device *dev, + struct fixed_phy_status *status) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->fixed_link_update) + ds->drv->fixed_link_update(ds, p->port, status); + + return 0; +} /* slave device setup *******************************************************/ +static void dsa_slave_phy_setup(struct dsa_slave_priv *p, + struct net_device *slave_dev) +{ + struct dsa_switch *ds = p->parent; + struct dsa_chip_data *cd = ds->pd; + struct device_node *phy_dn, *port_dn; + bool phy_is_fixed = false; + u32 phy_flags = 0; + int ret; + + port_dn = cd->port_dn[p->port]; + p->phy_interface = of_get_phy_mode(port_dn); + + phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); + if (of_phy_is_fixed_link(port_dn)) { + /* In the case of a fixed PHY, the DT node associated + * to the fixed PHY is the Port DT node + */ + ret = of_phy_register_fixed_link(port_dn); + if (ret) { + pr_err("failed to register fixed PHY\n"); + return; + } + phy_is_fixed = true; + phy_dn = port_dn; + } + + if (ds->drv->get_phy_flags) + phy_flags = ds->drv->get_phy_flags(ds, p->port); + + if (phy_dn) + p->phy = of_phy_connect(slave_dev, phy_dn, + dsa_slave_adjust_link, phy_flags, + p->phy_interface); + + if (p->phy && phy_is_fixed) + fixed_phy_set_link_update(p->phy, dsa_slave_fixed_link_update); + + /* We could not connect to a designated PHY, so use the switch internal + * MDIO bus instead + */ + if (!p->phy) + p->phy = ds->slave_mii_bus->phy_map[p->port]; + else + pr_info("attached PHY at address %d [%s]\n", + p->phy->addr, p->phy->drv->name); +} + +int dsa_slave_suspend(struct net_device *slave_dev) +{ + struct dsa_slave_priv *p = netdev_priv(slave_dev); + + netif_device_detach(slave_dev); + + if (p->phy) { + phy_stop(p->phy); + p->old_pause = -1; + p->old_link = -1; + p->old_duplex = -1; + phy_suspend(p->phy); + } + + return 0; +} + +int dsa_slave_resume(struct net_device *slave_dev) +{ + struct dsa_slave_priv *p = netdev_priv(slave_dev); + + netif_device_attach(slave_dev); + + if (p->phy) { + phy_resume(p->phy); + phy_start(p->phy); + } + + return 0; +} + struct net_device * dsa_slave_create(struct dsa_switch *ds, struct device *parent, int port, char *name) @@ -349,35 +484,48 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; eth_hw_addr_inherit(slave_dev, master); slave_dev->tx_queue_len = 0; + slave_dev->netdev_ops = &dsa_slave_netdev_ops; + + SET_NETDEV_DEV(slave_dev, parent); + slave_dev->dev.of_node = ds->pd->port_dn[port]; + slave_dev->vlan_features = master->vlan_features; + + p = netdev_priv(slave_dev); + p->dev = slave_dev; + p->parent = ds; + p->port = port; switch (ds->dst->tag_protocol) { #ifdef CONFIG_NET_DSA_TAG_DSA - case htons(ETH_P_DSA): - slave_dev->netdev_ops = &dsa_netdev_ops; + case DSA_TAG_PROTO_DSA: + p->xmit = dsa_netdev_ops.xmit; break; #endif #ifdef CONFIG_NET_DSA_TAG_EDSA - case htons(ETH_P_EDSA): - slave_dev->netdev_ops = &edsa_netdev_ops; + case DSA_TAG_PROTO_EDSA: + p->xmit = edsa_netdev_ops.xmit; break; #endif #ifdef CONFIG_NET_DSA_TAG_TRAILER - case htons(ETH_P_TRAILER): - slave_dev->netdev_ops = &trailer_netdev_ops; + case DSA_TAG_PROTO_TRAILER: + p->xmit = trailer_netdev_ops.xmit; + break; +#endif +#ifdef CONFIG_NET_DSA_TAG_BRCM + case DSA_TAG_PROTO_BRCM: + p->xmit = brcm_netdev_ops.xmit; break; #endif default: - BUG(); + p->xmit = dsa_slave_notag_xmit; + break; } - SET_NETDEV_DEV(slave_dev, parent); - slave_dev->vlan_features = master->vlan_features; + p->old_pause = -1; + p->old_link = -1; + p->old_duplex = -1; - p = netdev_priv(slave_dev); - p->dev = slave_dev; - p->parent = ds; - p->port = port; - p->phy = ds->slave_mii_bus->phy_map[port]; + dsa_slave_phy_setup(p, slave_dev); ret = register_netdev(slave_dev); if (ret) { @@ -390,6 +538,9 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, netif_carrier_off(slave_dev); if (p->phy != NULL) { + if (ds->drv->get_phy_flags(ds, port)) + p->phy->dev_flags |= ds->drv->get_phy_flags(ds, port); + phy_attach(slave_dev, dev_name(&p->phy->dev), PHY_INTERFACE_MODE_GMII); diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c new file mode 100644 index 000000000000..83d3572cdb20 --- /dev/null +++ b/net/dsa/tag_brcm.c @@ -0,0 +1,171 @@ +/* + * Broadcom tag support + * + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/etherdevice.h> +#include <linux/list.h> +#include <linux/slab.h> +#include "dsa_priv.h" + +/* This tag length is 4 bytes, older ones were 6 bytes, we do not + * handle them + */ +#define BRCM_TAG_LEN 4 + +/* Tag is constructed and desconstructed using byte by byte access + * because the tag is placed after the MAC Source Address, which does + * not make it 4-bytes aligned, so this might cause unaligned accesses + * on most systems where this is used. + */ + +/* Ingress and egress opcodes */ +#define BRCM_OPCODE_SHIFT 5 +#define BRCM_OPCODE_MASK 0x7 + +/* Ingress fields */ +/* 1st byte in the tag */ +#define BRCM_IG_TC_SHIFT 2 +#define BRCM_IG_TC_MASK 0x7 +/* 2nd byte in the tag */ +#define BRCM_IG_TE_MASK 0x3 +#define BRCM_IG_TS_SHIFT 7 +/* 3rd byte in the tag */ +#define BRCM_IG_DSTMAP2_MASK 1 +#define BRCM_IG_DSTMAP1_MASK 0xff + +/* Egress fields */ + +/* 2nd byte in the tag */ +#define BRCM_EG_CID_MASK 0xff + +/* 3rd byte in the tag */ +#define BRCM_EG_RC_MASK 0xff +#define BRCM_EG_RC_RSVD (3 << 6) +#define BRCM_EG_RC_EXCEPTION (1 << 5) +#define BRCM_EG_RC_PROT_SNOOP (1 << 4) +#define BRCM_EG_RC_PROT_TERM (1 << 3) +#define BRCM_EG_RC_SWITCH (1 << 2) +#define BRCM_EG_RC_MAC_LEARN (1 << 1) +#define BRCM_EG_RC_MIRROR (1 << 0) +#define BRCM_EG_TC_SHIFT 5 +#define BRCM_EG_TC_MASK 0x7 +#define BRCM_EG_PID_MASK 0x1f + +static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + u8 *brcm_tag; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + if (skb_cow_head(skb, BRCM_TAG_LEN) < 0) + goto out_free; + + skb_push(skb, BRCM_TAG_LEN); + + memmove(skb->data, skb->data + BRCM_TAG_LEN, 2 * ETH_ALEN); + + /* Build the tag after the MAC Source Address */ + brcm_tag = skb->data + 2 * ETH_ALEN; + + /* Set the ingress opcode, traffic class, tag enforcment is + * deprecated + */ + brcm_tag[0] = (1 << BRCM_OPCODE_SHIFT) | + ((skb->priority << BRCM_IG_TC_SHIFT) & BRCM_IG_TC_MASK); + brcm_tag[1] = 0; + brcm_tag[2] = 0; + if (p->port == 8) + brcm_tag[2] = BRCM_IG_DSTMAP2_MASK; + brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK; + + /* Queue the SKB for transmission on the parent interface, but + * do not modify its EtherType + */ + skb->dev = p->parent->dst->master_netdev; + dev_queue_xmit(skb); + + return NETDEV_TX_OK; + +out_free: + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) +{ + struct dsa_switch_tree *dst = dev->dsa_ptr; + struct dsa_switch *ds; + int source_port; + u8 *brcm_tag; + + if (unlikely(dst == NULL)) + goto out_drop; + + ds = dst->ds[0]; + + skb = skb_unshare(skb, GFP_ATOMIC); + if (skb == NULL) + goto out; + + if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN))) + goto out_drop; + + /* skb->data points to the EtherType, the tag is right before it */ + brcm_tag = skb->data - 2; + + /* The opcode should never be different than 0b000 */ + if (unlikely((brcm_tag[0] >> BRCM_OPCODE_SHIFT) & BRCM_OPCODE_MASK)) + goto out_drop; + + /* We should never see a reserved reason code without knowing how to + * handle it + */ + WARN_ON(brcm_tag[2] & BRCM_EG_RC_RSVD); + + /* Locate which port this is coming from */ + source_port = brcm_tag[3] & BRCM_EG_PID_MASK; + + /* Validate port against switch setup, either the port is totally */ + if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL) + goto out_drop; + + /* Remove Broadcom tag and update checksum */ + skb_pull_rcsum(skb, BRCM_TAG_LEN); + + /* Move the Ethernet DA and SA */ + memmove(skb->data - ETH_HLEN, + skb->data - ETH_HLEN - BRCM_TAG_LEN, + 2 * ETH_ALEN); + + skb_push(skb, ETH_HLEN); + skb->pkt_type = PACKET_HOST; + skb->dev = ds->ports[source_port]; + skb->protocol = eth_type_trans(skb, skb->dev); + + skb->dev->stats.rx_packets++; + skb->dev->stats.rx_bytes += skb->len; + + netif_receive_skb(skb); + + return 0; + +out_drop: + kfree_skb(skb); +out: + return 0; +} + +const struct dsa_device_ops brcm_netdev_ops = { + .xmit = brcm_tag_xmit, + .rcv = brcm_tag_rcv, +}; diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index cacce1e22f9c..ce90c8bdc658 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -10,13 +10,12 @@ #include <linux/etherdevice.h> #include <linux/list.h> -#include <linux/netdevice.h> #include <linux/slab.h> #include "dsa_priv.h" #define DSA_HLEN 4 -netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); u8 *dsa_header; @@ -186,7 +185,7 @@ out: return 0; } -struct packet_type dsa_packet_type __read_mostly = { - .type = cpu_to_be16(ETH_P_DSA), - .func = dsa_rcv, +const struct dsa_device_ops dsa_netdev_ops = { + .xmit = dsa_xmit, + .rcv = dsa_rcv, }; diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index e70c43c25e64..94fcce778679 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -10,14 +10,13 @@ #include <linux/etherdevice.h> #include <linux/list.h> -#include <linux/netdevice.h> #include <linux/slab.h> #include "dsa_priv.h" #define DSA_HLEN 4 #define EDSA_HLEN 8 -netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); u8 *edsa_header; @@ -205,7 +204,7 @@ out: return 0; } -struct packet_type edsa_packet_type __read_mostly = { - .type = cpu_to_be16(ETH_P_EDSA), - .func = edsa_rcv, +const struct dsa_device_ops edsa_netdev_ops = { + .xmit = edsa_xmit, + .rcv = edsa_rcv, }; diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 94bc260d015d..115fdca34077 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -10,11 +10,10 @@ #include <linux/etherdevice.h> #include <linux/list.h> -#include <linux/netdevice.h> #include <linux/slab.h> #include "dsa_priv.h" -netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); struct sk_buff *nskb; @@ -114,7 +113,7 @@ out: return 0; } -struct packet_type trailer_packet_type __read_mostly = { - .type = cpu_to_be16(ETH_P_TRAILER), - .func = trailer_rcv, +const struct dsa_device_ops trailer_netdev_ops = { + .xmit = trailer_xmit, + .rcv = trailer_rcv, }; diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index f405e0592407..33a140e15834 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -146,6 +146,33 @@ int eth_rebuild_header(struct sk_buff *skb) EXPORT_SYMBOL(eth_rebuild_header); /** + * eth_get_headlen - determine the the length of header for an ethernet frame + * @data: pointer to start of frame + * @len: total length of frame + * + * Make a best effort attempt to pull the length for all of the headers for + * a given frame in a linear buffer. + */ +u32 eth_get_headlen(void *data, unsigned int len) +{ + const struct ethhdr *eth = (const struct ethhdr *)data; + struct flow_keys keys; + + /* this should never happen, but better safe than sorry */ + if (len < sizeof(*eth)) + return len; + + /* parse any remaining L2/L3 headers, check for L4 */ + if (!__skb_flow_dissect(NULL, &keys, data, + eth->h_proto, sizeof(*eth), len)) + return max_t(u32, keys.thoff, sizeof(*eth)); + + /* parse for any L4 headers */ + return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len); +} +EXPORT_SYMBOL(eth_get_headlen); + +/** * eth_type_trans - determine the packet's protocol ID. * @skb: received socket data * @dev: receiving network device @@ -181,11 +208,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) * variants has been configured on the receiving interface, * and if so, set skb->protocol without looking at the packet. */ - if (unlikely(netdev_uses_dsa_tags(dev))) - return htons(ETH_P_DSA); - - if (unlikely(netdev_uses_trailer_tags(dev))) - return htons(ETH_P_TRAILER); + if (unlikely(netdev_uses_dsa(dev))) + return htons(ETH_P_XDSA); if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN)) return eth->h_proto; diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c index 6591d27e53a4..5e788cdc499a 100644 --- a/net/ieee802154/6lowpan_rtnl.c +++ b/net/ieee802154/6lowpan_rtnl.c @@ -77,14 +77,6 @@ lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) return netdev_priv(dev); } -static inline void lowpan_address_flip(u8 *src, u8 *dest) -{ - int i; - - for (i = 0; i < IEEE802154_ADDR_LEN; i++) - (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i]; -} - static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, const void *_saddr, unsigned int len) diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index dbc10d84161f..84f710b7472a 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -311,6 +311,16 @@ config NET_UDP_TUNNEL tristate default n +config NET_FOU + tristate "IP: Foo (IP protocols) over UDP" + select XFRM + select NET_UDP_TUNNEL + ---help--- + Foo over UDP allows any IP protocol to be directly encapsulated + over UDP include tunnels (IPIP, GRE, SIT). By encapsulating in UDP + network mechanisms and optimizations for UDP (such as ECMP + and RSS) can be leveraged to provide better service. + config INET_AH tristate "IP: AH transformation" select XFRM_ALGO diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 8ee1cd4053ee..d78d404c596f 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o obj-$(CONFIG_IP_MROUTE) += ipmr.o obj-$(CONFIG_NET_IPIP) += ipip.o gre-y := gre_demux.o +obj-$(CONFIG_NET_FOU) += fou.o obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o obj-$(CONFIG_NET_IPGRE) += ip_gre.o obj-$(CONFIG_NET_UDP_TUNNEL) += udp_tunnel.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index d156b3c5f363..72011cc4c13b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -418,10 +418,6 @@ int inet_release(struct socket *sock) } EXPORT_SYMBOL(inet_release); -/* It is off by default, see below. */ -int sysctl_ip_nonlocal_bind __read_mostly; -EXPORT_SYMBOL(sysctl_ip_nonlocal_bind); - int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; @@ -461,7 +457,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) * is temporarily down) */ err = -EADDRNOTAVAIL; - if (!sysctl_ip_nonlocal_bind && + if (!net->ipv4.sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && addr->sin_addr.s_addr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && @@ -1670,6 +1666,8 @@ static const struct net_offload ipip_offload = { .callbacks = { .gso_send_check = inet_gso_send_check, .gso_segment = inet_gso_segment, + .gro_receive = inet_gro_receive, + .gro_complete = inet_gro_complete, }, }; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 255aa9946fe7..23104a3f2924 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -243,7 +243,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos, int oif, struct net_device *dev, int rpf, struct in_device *idev, u32 *itag) { - int ret, no_addr, accept_local; + int ret, no_addr; struct fib_result res; struct flowi4 fl4; struct net *net; @@ -258,16 +258,17 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, no_addr = idev->ifa_list == NULL; - accept_local = IN_DEV_ACCEPT_LOCAL(idev); fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0; net = dev_net(dev); if (fib_lookup(net, &fl4, &res)) goto last_resort; - if (res.type != RTN_UNICAST) { - if (res.type != RTN_LOCAL || !accept_local) - goto e_inval; - } + if (res.type != RTN_UNICAST && + (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev))) + goto e_inval; + if (!rpf && !fib_num_tclassid_users(dev_net(dev)) && + (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) + goto last_resort; fib_combine_itag(itag, &res); dev_match = false; @@ -321,6 +322,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev); if (!r && !fib_num_tclassid_users(dev_net(dev)) && + IN_DEV_ACCEPT_LOCAL(idev) && (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) { *itag = 0; return 0; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index b10cd43a4722..5b6efb3d2308 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -157,9 +157,12 @@ static void rt_fibinfo_free(struct rtable __rcu **rtp) static void free_nh_exceptions(struct fib_nh *nh) { - struct fnhe_hash_bucket *hash = nh->nh_exceptions; + struct fnhe_hash_bucket *hash; int i; + hash = rcu_dereference_protected(nh->nh_exceptions, 1); + if (!hash) + return; for (i = 0; i < FNHE_HASH_SIZE; i++) { struct fib_nh_exception *fnhe; @@ -205,8 +208,7 @@ static void free_fib_info_rcu(struct rcu_head *head) change_nexthops(fi) { if (nexthop_nh->nh_dev) dev_put(nexthop_nh->nh_dev); - if (nexthop_nh->nh_exceptions) - free_nh_exceptions(nexthop_nh); + free_nh_exceptions(nexthop_nh); rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output); rt_fibinfo_free(&nexthop_nh->nh_rth_input); } endfor_nexthops(fi); diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c new file mode 100644 index 000000000000..dced89fbe480 --- /dev/null +++ b/net/ipv4/fou.c @@ -0,0 +1,368 @@ +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/socket.h> +#include <linux/skbuff.h> +#include <linux/ip.h> +#include <linux/udp.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <net/genetlink.h> +#include <net/ip.h> +#include <net/protocol.h> +#include <net/udp.h> +#include <net/udp_tunnel.h> +#include <net/xfrm.h> +#include <uapi/linux/fou.h> +#include <uapi/linux/genetlink.h> + +static DEFINE_SPINLOCK(fou_lock); +static LIST_HEAD(fou_list); + +struct fou { + struct socket *sock; + u8 protocol; + u16 port; + struct udp_offload udp_offloads; + struct list_head list; +}; + +struct fou_cfg { + u8 protocol; + struct udp_port_cfg udp_config; +}; + +static inline struct fou *fou_from_sock(struct sock *sk) +{ + return sk->sk_user_data; +} + +static int fou_udp_encap_recv_deliver(struct sk_buff *skb, + u8 protocol, size_t len) +{ + struct iphdr *iph = ip_hdr(skb); + + /* Remove 'len' bytes from the packet (UDP header and + * FOU header if present), modify the protocol to the one + * we found, and then call rcv_encap. + */ + iph->tot_len = htons(ntohs(iph->tot_len) - len); + __skb_pull(skb, len); + skb_postpull_rcsum(skb, udp_hdr(skb), len); + skb_reset_transport_header(skb); + + return -protocol; +} + +static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) +{ + struct fou *fou = fou_from_sock(sk); + + if (!fou) + return 1; + + return fou_udp_encap_recv_deliver(skb, fou->protocol, + sizeof(struct udphdr)); +} + +static struct sk_buff **fou_gro_receive(struct sk_buff **head, + struct sk_buff *skb, + const struct net_offload **offloads) +{ + const struct net_offload *ops; + struct sk_buff **pp = NULL; + u8 proto = NAPI_GRO_CB(skb)->proto; + + rcu_read_lock(); + ops = rcu_dereference(offloads[proto]); + if (!ops || !ops->callbacks.gro_receive) + goto out_unlock; + + pp = ops->callbacks.gro_receive(head, skb); + +out_unlock: + rcu_read_unlock(); + + return pp; +} + +static int fou_gro_complete(struct sk_buff *skb, int nhoff, + const struct net_offload **offloads) +{ + const struct net_offload *ops; + u8 proto = NAPI_GRO_CB(skb)->proto; + int err = -ENOSYS; + + rcu_read_lock(); + ops = rcu_dereference(offloads[proto]); + if (WARN_ON(!ops || !ops->callbacks.gro_complete)) + goto out_unlock; + + err = ops->callbacks.gro_complete(skb, nhoff); + +out_unlock: + rcu_read_unlock(); + + return err; +} + +static struct sk_buff **fou4_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + return fou_gro_receive(head, skb, inet_offloads); +} + +static int fou4_gro_complete(struct sk_buff *skb, int nhoff) +{ + return fou_gro_complete(skb, nhoff, inet_offloads); +} + +static struct sk_buff **fou6_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + return fou_gro_receive(head, skb, inet6_offloads); +} + +static int fou6_gro_complete(struct sk_buff *skb, int nhoff) +{ + return fou_gro_complete(skb, nhoff, inet6_offloads); +} + +static int fou_add_to_port_list(struct fou *fou) +{ + struct fou *fout; + + spin_lock(&fou_lock); + list_for_each_entry(fout, &fou_list, list) { + if (fou->port == fout->port) { + spin_unlock(&fou_lock); + return -EALREADY; + } + } + + list_add(&fou->list, &fou_list); + spin_unlock(&fou_lock); + + return 0; +} + +static void fou_release(struct fou *fou) +{ + struct socket *sock = fou->sock; + struct sock *sk = sock->sk; + + udp_del_offload(&fou->udp_offloads); + + list_del(&fou->list); + + /* Remove hooks into tunnel socket */ + sk->sk_user_data = NULL; + + sock_release(sock); + + kfree(fou); +} + +static int fou_create(struct net *net, struct fou_cfg *cfg, + struct socket **sockp) +{ + struct fou *fou = NULL; + int err; + struct socket *sock = NULL; + struct sock *sk; + + /* Open UDP socket */ + err = udp_sock_create(net, &cfg->udp_config, &sock); + if (err < 0) + goto error; + + /* Allocate FOU port structure */ + fou = kzalloc(sizeof(*fou), GFP_KERNEL); + if (!fou) { + err = -ENOMEM; + goto error; + } + + sk = sock->sk; + + /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ + fou->protocol = cfg->protocol; + fou->port = cfg->udp_config.local_udp_port; + udp_sk(sk)->encap_rcv = fou_udp_recv; + + udp_sk(sk)->encap_type = 1; + udp_encap_enable(); + + sk->sk_user_data = fou; + fou->sock = sock; + + udp_set_convert_csum(sk, true); + + sk->sk_allocation = GFP_ATOMIC; + + switch (cfg->udp_config.family) { + case AF_INET: + fou->udp_offloads.callbacks.gro_receive = fou4_gro_receive; + fou->udp_offloads.callbacks.gro_complete = fou4_gro_complete; + break; + case AF_INET6: + fou->udp_offloads.callbacks.gro_receive = fou6_gro_receive; + fou->udp_offloads.callbacks.gro_complete = fou6_gro_complete; + break; + default: + err = -EPFNOSUPPORT; + goto error; + } + + fou->udp_offloads.port = cfg->udp_config.local_udp_port; + fou->udp_offloads.ipproto = cfg->protocol; + + if (cfg->udp_config.family == AF_INET) { + err = udp_add_offload(&fou->udp_offloads); + if (err) + goto error; + } + + err = fou_add_to_port_list(fou); + if (err) + goto error; + + if (sockp) + *sockp = sock; + + return 0; + +error: + kfree(fou); + if (sock) + sock_release(sock); + + return err; +} + +static int fou_destroy(struct net *net, struct fou_cfg *cfg) +{ + struct fou *fou; + u16 port = cfg->udp_config.local_udp_port; + int err = -EINVAL; + + spin_lock(&fou_lock); + list_for_each_entry(fou, &fou_list, list) { + if (fou->port == port) { + udp_del_offload(&fou->udp_offloads); + fou_release(fou); + err = 0; + break; + } + } + spin_unlock(&fou_lock); + + return err; +} + +static struct genl_family fou_nl_family = { + .id = GENL_ID_GENERATE, + .hdrsize = 0, + .name = FOU_GENL_NAME, + .version = FOU_GENL_VERSION, + .maxattr = FOU_ATTR_MAX, + .netnsok = true, +}; + +static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = { + [FOU_ATTR_PORT] = { .type = NLA_U16, }, + [FOU_ATTR_AF] = { .type = NLA_U8, }, + [FOU_ATTR_IPPROTO] = { .type = NLA_U8, }, +}; + +static int parse_nl_config(struct genl_info *info, + struct fou_cfg *cfg) +{ + memset(cfg, 0, sizeof(*cfg)); + + cfg->udp_config.family = AF_INET; + + if (info->attrs[FOU_ATTR_AF]) { + u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]); + + if (family != AF_INET && family != AF_INET6) + return -EINVAL; + + cfg->udp_config.family = family; + } + + if (info->attrs[FOU_ATTR_PORT]) { + u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]); + + cfg->udp_config.local_udp_port = port; + } + + if (info->attrs[FOU_ATTR_IPPROTO]) + cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]); + + return 0; +} + +static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info) +{ + struct fou_cfg cfg; + int err; + + err = parse_nl_config(info, &cfg); + if (err) + return err; + + return fou_create(&init_net, &cfg, NULL); +} + +static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info) +{ + struct fou_cfg cfg; + + parse_nl_config(info, &cfg); + + return fou_destroy(&init_net, &cfg); +} + +static const struct genl_ops fou_nl_ops[] = { + { + .cmd = FOU_CMD_ADD, + .doit = fou_nl_cmd_add_port, + .policy = fou_nl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = FOU_CMD_DEL, + .doit = fou_nl_cmd_rm_port, + .policy = fou_nl_policy, + .flags = GENL_ADMIN_PERM, + }, +}; + +static int __init fou_init(void) +{ + int ret; + + ret = genl_register_family_with_ops(&fou_nl_family, + fou_nl_ops); + + return ret; +} + +static void __exit fou_fini(void) +{ + struct fou *fou, *next; + + genl_unregister_family(&fou_nl_family); + + /* Close all the FOU sockets */ + + spin_lock(&fou_lock); + list_for_each_entry_safe(fou, next, &fou_list, list) + fou_release(fou); + spin_unlock(&fou_lock); +} + +module_init(fou_init); +module_exit(fou_fini); +MODULE_AUTHOR("Tom Herbert <therbert@google.com>"); +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index 0485bf7f8f03..4a7b5b2a1ce3 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -98,7 +98,6 @@ EXPORT_SYMBOL_GPL(gre_build_header); static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, bool *csum_err) { - unsigned int ip_hlen = ip_hdrlen(skb); const struct gre_base_hdr *greh; __be32 *options; int hdr_len; @@ -106,7 +105,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) return -EINVAL; - greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); + greh = (struct gre_base_hdr *)skb_transport_header(skb); if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) return -EINVAL; @@ -116,7 +115,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, if (!pskb_may_pull(skb, hdr_len)) return -EINVAL; - greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen); + greh = (struct gre_base_hdr *)skb_transport_header(skb); tpi->proto = greh->protocol; options = (__be32 *)(greh + 1); @@ -125,6 +124,10 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, *csum_err = true; return -EINVAL; } + + skb_checksum_try_convert(skb, IPPROTO_GRE, 0, + null_compute_pseudo); + options++; } diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 6556263c8fa5..d3fe2ac05167 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -119,28 +119,6 @@ out: return segs; } -/* Compute the whole skb csum in s/w and store it, then verify GRO csum - * starting from gro_offset. - */ -static __sum16 gro_skb_checksum(struct sk_buff *skb) -{ - __sum16 sum; - - skb->csum = skb_checksum(skb, 0, skb->len, 0); - NAPI_GRO_CB(skb)->csum = csum_sub(skb->csum, - csum_partial(skb->data, skb_gro_offset(skb), 0)); - sum = csum_fold(NAPI_GRO_CB(skb)->csum); - if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) { - if (unlikely(!sum) && !skb->csum_complete_sw) - netdev_rx_csum_fault(skb->dev); - } else { - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum_complete_sw = 1; - } - - return sum; -} - static struct sk_buff **gre_gro_receive(struct sk_buff **head, struct sk_buff *skb) { @@ -192,22 +170,16 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, if (unlikely(!greh)) goto out_unlock; } - if (greh->flags & GRE_CSUM) { /* Need to verify GRE csum first */ - __sum16 csum = 0; - - if (skb->ip_summed == CHECKSUM_COMPLETE) - csum = csum_fold(NAPI_GRO_CB(skb)->csum); - /* Don't trust csum error calculated/reported by h/w */ - if (skb->ip_summed == CHECKSUM_NONE || csum != 0) - csum = gro_skb_checksum(skb); - - /* GRE CSUM is the 1's complement of the 1's complement sum - * of the GRE hdr plus payload so it should add up to 0xffff - * (and 0 after csum_fold()) just like the IPv4 hdr csum. - */ - if (csum) + + /* Don't bother verifying checksum if we're going to flush anyway. */ + if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) { + if (skb_gro_checksum_simple_validate(skb)) goto out_unlock; + + skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0, + null_compute_pseudo); } + flush = 0; for (p = *head; p; p = p->next) { diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index f10eab462282..4146153d875d 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -117,7 +117,7 @@ #define IGMP_V2_Unsolicited_Report_Interval (10*HZ) #define IGMP_V3_Unsolicited_Report_Interval (1*HZ) #define IGMP_Query_Response_Interval (10*HZ) -#define IGMP_Unsolicited_Report_Count 2 +#define IGMP_Query_Robustness_Variable 2 #define IGMP_Initial_Report_Delay (1) @@ -756,8 +756,7 @@ static void igmp_ifc_event(struct in_device *in_dev) { if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) return; - in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + in_dev->mr_ifc_count = in_dev->mr_qrv ?: sysctl_igmp_qrv; igmp_ifc_start_timer(in_dev, 1); } @@ -1086,8 +1085,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) pmc->interface = im->interface; in_dev_hold(in_dev); pmc->multiaddr = im->multiaddr; - pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; pmc->sfmode = im->sfmode; if (pmc->sfmode == MCAST_INCLUDE) { struct ip_sf_list *psf; @@ -1226,8 +1224,7 @@ static void igmp_group_added(struct ip_mc_list *im) } /* else, v3 */ - im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + im->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; igmp_ifc_event(in_dev); #endif } @@ -1322,7 +1319,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) spin_lock_init(&im->lock); #ifdef CONFIG_IP_MULTICAST setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im); - im->unsolicit_count = IGMP_Unsolicited_Report_Count; + im->unsolicit_count = sysctl_igmp_qrv; #endif im->next_rcu = in_dev->mc_list; @@ -1460,7 +1457,7 @@ void ip_mc_init_dev(struct in_device *in_dev) (unsigned long)in_dev); setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, (unsigned long)in_dev); - in_dev->mr_qrv = IGMP_Unsolicited_Report_Count; + in_dev->mr_qrv = sysctl_igmp_qrv; #endif spin_lock_init(&in_dev->mc_tomb_lock); @@ -1474,6 +1471,9 @@ void ip_mc_up(struct in_device *in_dev) ASSERT_RTNL(); +#ifdef CONFIG_IP_MULTICAST + in_dev->mr_qrv = sysctl_igmp_qrv; +#endif ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); for_each_pmc_rtnl(in_dev, pmc) @@ -1540,7 +1540,9 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) */ int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS; int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF; - +#ifdef CONFIG_IP_MULTICAST +int sysctl_igmp_qrv __read_mostly = IGMP_Query_Robustness_Variable; +#endif static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, __be32 *psfsrc) @@ -1575,8 +1577,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, #ifdef CONFIG_IP_MULTICAST if (psf->sf_oldin && !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { - psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + psf->sf_crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; psf->sf_next = pmc->tomb; pmc->tomb = psf; rv = 1; @@ -1639,8 +1640,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, /* filter mode change */ pmc->sfmode = MCAST_INCLUDE; #ifdef CONFIG_IP_MULTICAST - pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; in_dev->mr_ifc_count = pmc->crcount; for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; @@ -1818,8 +1818,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, #ifdef CONFIG_IP_MULTICAST /* else no filters; keep old mode for reports */ - pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : - IGMP_Unsolicited_Report_Count; + pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv; in_dev->mr_ifc_count = pmc->crcount; for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; @@ -2539,7 +2538,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v) querier = "NONE"; #endif - if (rcu_dereference(state->in_dev->mc_list) == im) { + if (rcu_access_pointer(state->in_dev->mc_list) == im) { seq_printf(seq, "%d\t%-10s: %5d %7s\n", state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier); } diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 43116e8c8e13..9111a4e22155 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -229,7 +229,7 @@ begin: } } else if (score == hiscore && reuseport) { matches++; - if (((u64)phash * matches) >> 32 == 0) + if (reciprocal_scale(phash, matches) == 0) result = sk; phash = next_pseudo_random32(phash); } diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index bd5f5928167d..241afd743d2c 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -72,29 +72,10 @@ void inet_peer_base_init(struct inet_peer_base *bp) { bp->root = peer_avl_empty_rcu; seqlock_init(&bp->lock); - bp->flush_seq = ~0U; bp->total = 0; } EXPORT_SYMBOL_GPL(inet_peer_base_init); -static atomic_t v4_seq = ATOMIC_INIT(0); -static atomic_t v6_seq = ATOMIC_INIT(0); - -static atomic_t *inetpeer_seq_ptr(int family) -{ - return (family == AF_INET ? &v4_seq : &v6_seq); -} - -static inline void flush_check(struct inet_peer_base *base, int family) -{ - atomic_t *fp = inetpeer_seq_ptr(family); - - if (unlikely(base->flush_seq != atomic_read(fp))) { - inetpeer_invalidate_tree(base); - base->flush_seq = atomic_read(fp); - } -} - #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ /* Exported for sysctl_net_ipv4. */ @@ -444,8 +425,6 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, unsigned int sequence; int invalidated, gccnt = 0; - flush_check(base, daddr->family); - /* Attempt a lockless lookup first. * Because of a concurrent writer, we might not find an existing entry. */ diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 9b842544aea3..829aff8bf723 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -239,7 +239,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, tpi.seq = htonl(tunnel->o_seqno); /* Push GRE header. */ - gre_build_header(skb, &tpi, tunnel->hlen); + gre_build_header(skb, &tpi, tunnel->tun_hlen); ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); } @@ -310,7 +310,7 @@ out: static int ipgre_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - int err = 0; + int err; struct ip_tunnel_parm p; if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) @@ -470,13 +470,18 @@ static void ipgre_tunnel_setup(struct net_device *dev) static void __gre_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel; + int t_hlen; tunnel = netdev_priv(dev); - tunnel->hlen = ip_gre_calc_hlen(tunnel->parms.o_flags); + tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags); tunnel->parms.iph.protocol = IPPROTO_GRE; - dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4; - dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4; + tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; + + t_hlen = tunnel->hlen + sizeof(struct iphdr); + + dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; + dev->mtu = ETH_DATA_LEN - t_hlen - 4; dev->features |= GRE_FEATURES; dev->hw_features |= GRE_FEATURES; @@ -628,6 +633,40 @@ static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[], parms->iph.frag_off = htons(IP_DF); } +/* This function returns true when ENCAP attributes are present in the nl msg */ +static bool ipgre_netlink_encap_parms(struct nlattr *data[], + struct ip_tunnel_encap *ipencap) +{ + bool ret = false; + + memset(ipencap, 0, sizeof(*ipencap)); + + if (!data) + return ret; + + if (data[IFLA_GRE_ENCAP_TYPE]) { + ret = true; + ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]); + } + + if (data[IFLA_GRE_ENCAP_FLAGS]) { + ret = true; + ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]); + } + + if (data[IFLA_GRE_ENCAP_SPORT]) { + ret = true; + ipencap->sport = nla_get_u16(data[IFLA_GRE_ENCAP_SPORT]); + } + + if (data[IFLA_GRE_ENCAP_DPORT]) { + ret = true; + ipencap->dport = nla_get_u16(data[IFLA_GRE_ENCAP_DPORT]); + } + + return ret; +} + static int gre_tap_init(struct net_device *dev) { __gre_tunnel_init(dev); @@ -657,6 +696,15 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; + + if (ipgre_netlink_encap_parms(data, &ipencap)) { + struct ip_tunnel *t = netdev_priv(dev); + int err = ip_tunnel_encap_setup(t, &ipencap); + + if (err < 0) + return err; + } ipgre_netlink_parms(data, tb, &p); return ip_tunnel_newlink(dev, tb, &p); @@ -666,6 +714,15 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; + + if (ipgre_netlink_encap_parms(data, &ipencap)) { + struct ip_tunnel *t = netdev_priv(dev); + int err = ip_tunnel_encap_setup(t, &ipencap); + + if (err < 0) + return err; + } ipgre_netlink_parms(data, tb, &p); return ip_tunnel_changelink(dev, tb, &p); @@ -694,6 +751,14 @@ static size_t ipgre_get_size(const struct net_device *dev) nla_total_size(1) + /* IFLA_GRE_PMTUDISC */ nla_total_size(1) + + /* IFLA_GRE_ENCAP_TYPE */ + nla_total_size(2) + + /* IFLA_GRE_ENCAP_FLAGS */ + nla_total_size(2) + + /* IFLA_GRE_ENCAP_SPORT */ + nla_total_size(2) + + /* IFLA_GRE_ENCAP_DPORT */ + nla_total_size(2) + 0; } @@ -714,6 +779,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)))) goto nla_put_failure; + + if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, + t->encap.type) || + nla_put_u16(skb, IFLA_GRE_ENCAP_SPORT, + t->encap.sport) || + nla_put_u16(skb, IFLA_GRE_ENCAP_DPORT, + t->encap.dport) || + nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, + t->encap.dport)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -731,6 +807,10 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { [IFLA_GRE_TTL] = { .type = NLA_U8 }, [IFLA_GRE_TOS] = { .type = NLA_U8 }, [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 }, + [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 }, + [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 }, + [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, + [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, }; static struct rtnl_link_ops ipgre_link_ops __read_mostly = { diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 5cb830c78990..c373a9ad4555 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -303,7 +303,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, } /* dont let ip_call_ra_chain() use sk again */ ra->sk = NULL; - rcu_assign_pointer(*rap, ra->next); + RCU_INIT_POINTER(*rap, ra->next); spin_unlock_bh(&ip_ra_lock); if (ra->destructor) @@ -325,7 +325,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, new_ra->sk = sk; new_ra->destructor = destructor; - new_ra->next = ra; + RCU_INIT_POINTER(new_ra->next, ra); rcu_assign_pointer(*rap, new_ra); sock_hold(sk); spin_unlock_bh(&ip_ra_lock); @@ -405,7 +405,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { struct sock_exterr_skb *serr; - struct sk_buff *skb, *skb2; + struct sk_buff *skb; DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct { struct sock_extended_err ee; @@ -415,7 +415,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) int copied; err = -EAGAIN; - skb = skb_dequeue(&sk->sk_error_queue); + skb = sock_dequeue_err_skb(sk); if (skb == NULL) goto out; @@ -462,17 +462,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_ERRQUEUE; err = copied; - /* Reset and regenerate socket error */ - spin_lock_bh(&sk->sk_error_queue.lock); - sk->sk_err = 0; - skb2 = skb_peek(&sk->sk_error_queue); - if (skb2 != NULL) { - sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; - spin_unlock_bh(&sk->sk_error_queue.lock); - sk->sk_error_report(sk); - } else - spin_unlock_bh(&sk->sk_error_queue.lock); - out_free_skb: kfree_skb(skb); out: diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index bd41dd1948b6..b75b47b0a223 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -55,6 +55,7 @@ #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> +#include <net/udp.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> @@ -487,6 +488,91 @@ drop: } EXPORT_SYMBOL_GPL(ip_tunnel_rcv); +static int ip_encap_hlen(struct ip_tunnel_encap *e) +{ + switch (e->type) { + case TUNNEL_ENCAP_NONE: + return 0; + case TUNNEL_ENCAP_FOU: + return sizeof(struct udphdr); + default: + return -EINVAL; + } +} + +int ip_tunnel_encap_setup(struct ip_tunnel *t, + struct ip_tunnel_encap *ipencap) +{ + int hlen; + + memset(&t->encap, 0, sizeof(t->encap)); + + hlen = ip_encap_hlen(ipencap); + if (hlen < 0) + return hlen; + + t->encap.type = ipencap->type; + t->encap.sport = ipencap->sport; + t->encap.dport = ipencap->dport; + t->encap.flags = ipencap->flags; + + t->encap_hlen = hlen; + t->hlen = t->encap_hlen + t->tun_hlen; + + return 0; +} +EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup); + +static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + size_t hdr_len, u8 *protocol, struct flowi4 *fl4) +{ + struct udphdr *uh; + __be16 sport; + bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM); + int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + + skb = iptunnel_handle_offloads(skb, csum, type); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* Get length and hash before making space in skb */ + + sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), + skb, 0, 0, false); + + skb_push(skb, hdr_len); + + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = e->dport; + uh->source = sport; + uh->len = htons(skb->len); + uh->check = 0; + udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb, + fl4->saddr, fl4->daddr, skb->len); + + *protocol = IPPROTO_UDP; + + return 0; +} + +int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, + u8 *protocol, struct flowi4 *fl4) +{ + switch (t->encap.type) { + case TUNNEL_ENCAP_NONE: + return 0; + case TUNNEL_ENCAP_FOU: + return fou_build_header(skb, &t->encap, t->encap_hlen, + protocol, fl4); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL(ip_tunnel_encap); + static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, struct rtable *rt, __be16 df) { @@ -536,7 +622,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, } void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, - const struct iphdr *tnl_params, const u8 protocol) + const struct iphdr *tnl_params, u8 protocol) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *inner_iph; @@ -617,6 +703,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); + if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) + goto tx_error; + rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL; if (!rt) { diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 5bbef4fdcb43..648fa1490ea7 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -262,7 +262,8 @@ static int __init ic_open_devs(void) /* wait for a carrier on at least one device */ start = jiffies; next_msg = start + msecs_to_jiffies(CONF_CARRIER_TIMEOUT/12); - while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { + while (time_before(jiffies, start + + msecs_to_jiffies(CONF_CARRIER_TIMEOUT))) { int wait, elapsed; for_each_netdev(&init_net, dev) diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 62eaa005e146..bfec31df8b21 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -301,7 +301,8 @@ static int ipip_tunnel_init(struct net_device *dev) memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); - tunnel->hlen = 0; + tunnel->tun_hlen = 0; + tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen; tunnel->parms.iph.protocol = IPPROTO_IPIP; return ip_tunnel_init(dev); } @@ -340,10 +341,53 @@ static void ipip_netlink_parms(struct nlattr *data[], parms->iph.frag_off = htons(IP_DF); } +/* This function returns true when ENCAP attributes are present in the nl msg */ +static bool ipip_netlink_encap_parms(struct nlattr *data[], + struct ip_tunnel_encap *ipencap) +{ + bool ret = false; + + memset(ipencap, 0, sizeof(*ipencap)); + + if (!data) + return ret; + + if (data[IFLA_IPTUN_ENCAP_TYPE]) { + ret = true; + ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); + } + + if (data[IFLA_IPTUN_ENCAP_FLAGS]) { + ret = true; + ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); + } + + if (data[IFLA_IPTUN_ENCAP_SPORT]) { + ret = true; + ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); + } + + if (data[IFLA_IPTUN_ENCAP_DPORT]) { + ret = true; + ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); + } + + return ret; +} + static int ipip_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; + + if (ipip_netlink_encap_parms(data, &ipencap)) { + struct ip_tunnel *t = netdev_priv(dev); + int err = ip_tunnel_encap_setup(t, &ipencap); + + if (err < 0) + return err; + } ipip_netlink_parms(data, &p); return ip_tunnel_newlink(dev, tb, &p); @@ -353,6 +397,15 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; + + if (ipip_netlink_encap_parms(data, &ipencap)) { + struct ip_tunnel *t = netdev_priv(dev); + int err = ip_tunnel_encap_setup(t, &ipencap); + + if (err < 0) + return err; + } ipip_netlink_parms(data, &p); @@ -378,6 +431,14 @@ static size_t ipip_get_size(const struct net_device *dev) nla_total_size(1) + /* IFLA_IPTUN_PMTUDISC */ nla_total_size(1) + + /* IFLA_IPTUN_ENCAP_TYPE */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_FLAGS */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_SPORT */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_DPORT */ + nla_total_size(2) + 0; } @@ -394,6 +455,17 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_IPTUN_PMTUDISC, !!(parm->iph.frag_off & htons(IP_DF)))) goto nla_put_failure; + + if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, + tunnel->encap.type) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, + tunnel->encap.sport) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, + tunnel->encap.dport) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, + tunnel->encap.dport)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -407,6 +479,10 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = { [IFLA_IPTUN_TTL] = { .type = NLA_U8 }, [IFLA_IPTUN_TOS] = { .type = NLA_U8 }, [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 }, + [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, }; static struct rtnl_link_ops ipip_link_ops __read_mostly = { diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 7cbcaf4f0194..d189c5262bdb 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -232,8 +232,21 @@ config IP_NF_NAT if IP_NF_NAT +config NF_NAT_MASQUERADE_IPV4 + tristate "IPv4 masquerade support" + help + This is the kernel functionality to provide NAT in the masquerade + flavour (automatic source address selection). + +config NFT_MASQ_IPV4 + tristate "IPv4 masquerading support for nf_tables" + depends on NF_TABLES_IPV4 + depends on NFT_MASQ + select NF_NAT_MASQUERADE_IPV4 + config IP_NF_TARGET_MASQUERADE tristate "MASQUERADE target support" + select NF_NAT_MASQUERADE_IPV4 default m if NETFILTER_ADVANCED=n help Masquerading is a special case of NAT: all outgoing connections are diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index edf4af32e9f2..14488cc5fd2c 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_NF_LOG_IPV4) += nf_log_ipv4.o obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o +obj-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o # NAT protocols (nf_nat) obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o @@ -35,6 +36,7 @@ obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o +obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o # generic IP tables diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 2510c02c2d21..e90f83a3415b 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -285,7 +285,7 @@ clusterip_hashfn(const struct sk_buff *skb, } /* node numbers are 1..n, not 0..n */ - return (((u64)hashval * config->num_total_nodes) >> 32) + 1; + return reciprocal_scale(hashval, config->num_total_nodes) + 1; } static inline int diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 00352ce0f0de..da7f02a0b868 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c @@ -22,6 +22,7 @@ #include <linux/netfilter_ipv4.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_nat.h> +#include <net/netfilter/ipv4/nf_nat_masquerade.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); @@ -46,103 +47,17 @@ static int masquerade_tg_check(const struct xt_tgchk_param *par) static unsigned int masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par) { - struct nf_conn *ct; - struct nf_conn_nat *nat; - enum ip_conntrack_info ctinfo; - struct nf_nat_range newrange; + struct nf_nat_range range; const struct nf_nat_ipv4_multi_range_compat *mr; - const struct rtable *rt; - __be32 newsrc, nh; - - NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING); - - ct = nf_ct_get(skb, &ctinfo); - nat = nfct_nat(ct); - - NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || - ctinfo == IP_CT_RELATED_REPLY)); - - /* Source address is 0.0.0.0 - locally generated packet that is - * probably not supposed to be masqueraded. - */ - if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) - return NF_ACCEPT; mr = par->targinfo; - rt = skb_rtable(skb); - nh = rt_nexthop(rt, ip_hdr(skb)->daddr); - newsrc = inet_select_addr(par->out, nh, RT_SCOPE_UNIVERSE); - if (!newsrc) { - pr_info("%s ate my IP address\n", par->out->name); - return NF_DROP; - } - - nat->masq_index = par->out->ifindex; - - /* Transfer from original range. */ - memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); - memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); - newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS; - newrange.min_addr.ip = newsrc; - newrange.max_addr.ip = newsrc; - newrange.min_proto = mr->range[0].min; - newrange.max_proto = mr->range[0].max; + range.flags = mr->range[0].flags; + range.min_proto = mr->range[0].min; + range.max_proto = mr->range[0].max; - /* Hand modified range to generic setup. */ - return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); + return nf_nat_masquerade_ipv4(skb, par->hooknum, &range, par->out); } -static int -device_cmp(struct nf_conn *i, void *ifindex) -{ - const struct nf_conn_nat *nat = nfct_nat(i); - - if (!nat) - return 0; - if (nf_ct_l3num(i) != NFPROTO_IPV4) - return 0; - return nat->masq_index == (int)(long)ifindex; -} - -static int masq_device_event(struct notifier_block *this, - unsigned long event, - void *ptr) -{ - const struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct net *net = dev_net(dev); - - if (event == NETDEV_DOWN) { - /* Device was downed. Search entire table for - conntracks which were associated with that device, - and forget them. */ - NF_CT_ASSERT(dev->ifindex != 0); - - nf_ct_iterate_cleanup(net, device_cmp, - (void *)(long)dev->ifindex, 0, 0); - } - - return NOTIFY_DONE; -} - -static int masq_inet_event(struct notifier_block *this, - unsigned long event, - void *ptr) -{ - struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; - struct netdev_notifier_info info; - - netdev_notifier_info_init(&info, dev); - return masq_device_event(this, event, &info); -} - -static struct notifier_block masq_dev_notifier = { - .notifier_call = masq_device_event, -}; - -static struct notifier_block masq_inet_notifier = { - .notifier_call = masq_inet_event, -}; - static struct xt_target masquerade_tg_reg __read_mostly = { .name = "MASQUERADE", .family = NFPROTO_IPV4, @@ -160,12 +75,8 @@ static int __init masquerade_tg_init(void) ret = xt_register_target(&masquerade_tg_reg); - if (ret == 0) { - /* Register for device down reports */ - register_netdevice_notifier(&masq_dev_notifier); - /* Register IP address change reports */ - register_inetaddr_notifier(&masq_inet_notifier); - } + if (ret == 0) + nf_nat_masquerade_ipv4_register_notifier(); return ret; } @@ -173,8 +84,7 @@ static int __init masquerade_tg_init(void) static void __exit masquerade_tg_exit(void) { xt_unregister_target(&masquerade_tg_reg); - unregister_netdevice_notifier(&masq_dev_notifier); - unregister_inetaddr_notifier(&masq_inet_notifier); + nf_nat_masquerade_ipv4_unregister_notifier(); } module_init(masquerade_tg_init); diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index f1787c04a4dd..6b67d7e9a75d 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c @@ -28,222 +28,57 @@ static const struct xt_table nf_nat_ipv4_table = { .af = NFPROTO_IPV4, }; -static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) -{ - /* Force range to this IP; let proto decide mapping for - * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). - */ - struct nf_nat_range range; - - range.flags = 0; - pr_debug("Allocating NULL binding for %p (%pI4)\n", ct, - HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? - &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip : - &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip); - - return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); -} - -static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum, - const struct net_device *in, - const struct net_device *out, - struct nf_conn *ct) +static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct) { struct net *net = nf_ct_net(ct); - unsigned int ret; - ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table); - if (ret == NF_ACCEPT) { - if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) - ret = alloc_null_binding(ct, hooknum); - } - return ret; + return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.nat_table); } -static unsigned int -nf_nat_ipv4_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - struct nf_conn_nat *nat; - /* maniptype == SRC for postrouting. */ - enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); - - /* We never see fragments: conntrack defrags on pre-routing - * and local-out, and nf_nat_out protects post-routing. - */ - NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb))); - - ct = nf_ct_get(skb, &ctinfo); - /* Can't track? It's not due to stress, or conntrack would - * have dropped it. Hence it's the user's responsibilty to - * packet filter it out, or implement conntrack/NAT for that - * protocol. 8) --RR - */ - if (!ct) - return NF_ACCEPT; - - /* Don't try to NAT if this packet is not conntracked */ - if (nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; - - switch (ctinfo) { - case IP_CT_RELATED: - case IP_CT_RELATED_REPLY: - if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { - if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, - ops->hooknum)) - return NF_DROP; - else - return NF_ACCEPT; - } - /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ - case IP_CT_NEW: - /* Seen it before? This can happen for loopback, retrans, - * or local packets. - */ - if (!nf_nat_initialized(ct, maniptype)) { - unsigned int ret; - - ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct); - if (ret != NF_ACCEPT) - return ret; - } else { - pr_debug("Already setup manip %s for ct %p\n", - maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", - ct); - if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) - goto oif_changed; - } - break; - - default: - /* ESTABLISHED */ - NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || - ctinfo == IP_CT_ESTABLISHED_REPLY); - if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) - goto oif_changed; - } - - return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); - -oif_changed: - nf_ct_kill_acct(ct, ctinfo, skb); - return NF_DROP; + return nf_nat_ipv4_fn(ops, skb, in, out, iptable_nat_do_chain); } -static unsigned int -nf_nat_ipv4_in(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - unsigned int ret; - __be32 daddr = ip_hdr(skb)->daddr; - - ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - daddr != ip_hdr(skb)->daddr) - skb_dst_drop(skb); - - return ret; + return nf_nat_ipv4_in(ops, skb, in, out, iptable_nat_do_chain); } -static unsigned int -nf_nat_ipv4_out(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { -#ifdef CONFIG_XFRM - const struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - int err; -#endif - unsigned int ret; - - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) - return NF_ACCEPT; - - ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn); -#ifdef CONFIG_XFRM - if (ret != NF_DROP && ret != NF_STOLEN && - !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if ((ct->tuplehash[dir].tuple.src.u3.ip != - ct->tuplehash[!dir].tuple.dst.u3.ip) || - (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && - ct->tuplehash[dir].tuple.src.u.all != - ct->tuplehash[!dir].tuple.dst.u.all)) { - err = nf_xfrm_me_harder(skb, AF_INET); - if (err < 0) - ret = NF_DROP_ERR(err); - } - } -#endif - return ret; + return nf_nat_ipv4_out(ops, skb, in, out, iptable_nat_do_chain); } -static unsigned int -nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - const struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - unsigned int ret; - int err; - - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) - return NF_ACCEPT; - - ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (ct->tuplehash[dir].tuple.dst.u3.ip != - ct->tuplehash[!dir].tuple.src.u3.ip) { - err = ip_route_me_harder(skb, RTN_UNSPEC); - if (err < 0) - ret = NF_DROP_ERR(err); - } -#ifdef CONFIG_XFRM - else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && - ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && - ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[!dir].tuple.src.u.all) { - err = nf_xfrm_me_harder(skb, AF_INET); - if (err < 0) - ret = NF_DROP_ERR(err); - } -#endif - } - return ret; + return nf_nat_ipv4_local_fn(ops, skb, in, out, iptable_nat_do_chain); } static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { /* Before packet filtering, change destination */ { - .hook = nf_nat_ipv4_in, + .hook = iptable_nat_ipv4_in, .owner = THIS_MODULE, .pf = NFPROTO_IPV4, .hooknum = NF_INET_PRE_ROUTING, @@ -251,7 +86,7 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { }, /* After packet filtering, change source */ { - .hook = nf_nat_ipv4_out, + .hook = iptable_nat_ipv4_out, .owner = THIS_MODULE, .pf = NFPROTO_IPV4, .hooknum = NF_INET_POST_ROUTING, @@ -259,7 +94,7 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { }, /* Before packet filtering, change destination */ { - .hook = nf_nat_ipv4_local_fn, + .hook = iptable_nat_ipv4_local_fn, .owner = THIS_MODULE, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_OUT, @@ -267,7 +102,7 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { }, /* After packet filtering, change source */ { - .hook = nf_nat_ipv4_fn, + .hook = iptable_nat_ipv4_fn, .owner = THIS_MODULE, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_IN, diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index 14f5ccd06337..fc37711e11f3 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c @@ -254,6 +254,205 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation); +unsigned int +nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + struct nf_conn_nat *nat; + /* maniptype == SRC for postrouting. */ + enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); + + /* We never see fragments: conntrack defrags on pre-routing + * and local-out, and nf_nat_out protects post-routing. + */ + NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb))); + + ct = nf_ct_get(skb, &ctinfo); + /* Can't track? It's not due to stress, or conntrack would + * have dropped it. Hence it's the user's responsibilty to + * packet filter it out, or implement conntrack/NAT for that + * protocol. 8) --RR + */ + if (!ct) + return NF_ACCEPT; + + /* Don't try to NAT if this packet is not conntracked */ + if (nf_ct_is_untracked(ct)) + return NF_ACCEPT; + + nat = nf_ct_nat_ext_add(ct); + if (nat == NULL) + return NF_ACCEPT; + + switch (ctinfo) { + case IP_CT_RELATED: + case IP_CT_RELATED_REPLY: + if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { + if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, + ops->hooknum)) + return NF_DROP; + else + return NF_ACCEPT; + } + /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ + case IP_CT_NEW: + /* Seen it before? This can happen for loopback, retrans, + * or local packets. + */ + if (!nf_nat_initialized(ct, maniptype)) { + unsigned int ret; + + ret = do_chain(ops, skb, in, out, ct); + if (ret != NF_ACCEPT) + return ret; + + if (nf_nat_initialized(ct, HOOK2MANIP(ops->hooknum))) + break; + + ret = nf_nat_alloc_null_binding(ct, ops->hooknum); + if (ret != NF_ACCEPT) + return ret; + } else { + pr_debug("Already setup manip %s for ct %p\n", + maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", + ct); + if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) + goto oif_changed; + } + break; + + default: + /* ESTABLISHED */ + NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || + ctinfo == IP_CT_ESTABLISHED_REPLY); + if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) + goto oif_changed; + } + + return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); + +oif_changed: + nf_ct_kill_acct(ct, ctinfo, skb); + return NF_DROP; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn); + +unsigned int +nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + unsigned int ret; + __be32 daddr = ip_hdr(skb)->daddr; + + ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); + if (ret != NF_DROP && ret != NF_STOLEN && + daddr != ip_hdr(skb)->daddr) + skb_dst_drop(skb); + + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv4_in); + +unsigned int +nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ +#ifdef CONFIG_XFRM + const struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + int err; +#endif + unsigned int ret; + + /* root is playing with raw sockets. */ + if (skb->len < sizeof(struct iphdr) || + ip_hdrlen(skb) < sizeof(struct iphdr)) + return NF_ACCEPT; + + ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); +#ifdef CONFIG_XFRM + if (ret != NF_DROP && ret != NF_STOLEN && + !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && + (ct = nf_ct_get(skb, &ctinfo)) != NULL) { + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + + if ((ct->tuplehash[dir].tuple.src.u3.ip != + ct->tuplehash[!dir].tuple.dst.u3.ip) || + (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && + ct->tuplehash[dir].tuple.src.u.all != + ct->tuplehash[!dir].tuple.dst.u.all)) { + err = nf_xfrm_me_harder(skb, AF_INET); + if (err < 0) + ret = NF_DROP_ERR(err); + } + } +#endif + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv4_out); + +unsigned int +nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + const struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + unsigned int ret; + int err; + + /* root is playing with raw sockets. */ + if (skb->len < sizeof(struct iphdr) || + ip_hdrlen(skb) < sizeof(struct iphdr)) + return NF_ACCEPT; + + ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain); + if (ret != NF_DROP && ret != NF_STOLEN && + (ct = nf_ct_get(skb, &ctinfo)) != NULL) { + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + + if (ct->tuplehash[dir].tuple.dst.u3.ip != + ct->tuplehash[!dir].tuple.src.u3.ip) { + err = ip_route_me_harder(skb, RTN_UNSPEC); + if (err < 0) + ret = NF_DROP_ERR(err); + } +#ifdef CONFIG_XFRM + else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && + ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && + ct->tuplehash[dir].tuple.dst.u.all != + ct->tuplehash[!dir].tuple.src.u.all) { + err = nf_xfrm_me_harder(skb, AF_INET); + if (err < 0) + ret = NF_DROP_ERR(err); + } +#endif + } + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv4_local_fn); + static int __init nf_nat_l3proto_ipv4_init(void) { int err; diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c new file mode 100644 index 000000000000..c6eb42100e9a --- /dev/null +++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c @@ -0,0 +1,153 @@ +/* (C) 1999-2001 Paul `Rusty' Russell + * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/atomic.h> +#include <linux/inetdevice.h> +#include <linux/ip.h> +#include <linux/timer.h> +#include <linux/netfilter.h> +#include <net/protocol.h> +#include <net/ip.h> +#include <net/checksum.h> +#include <net/route.h> +#include <linux/netfilter_ipv4.h> +#include <linux/netfilter/x_tables.h> +#include <net/netfilter/nf_nat.h> +#include <net/netfilter/ipv4/nf_nat_masquerade.h> + +unsigned int +nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, + const struct nf_nat_range *range, + const struct net_device *out) +{ + struct nf_conn *ct; + struct nf_conn_nat *nat; + enum ip_conntrack_info ctinfo; + struct nf_nat_range newrange; + const struct rtable *rt; + __be32 newsrc, nh; + + NF_CT_ASSERT(hooknum == NF_INET_POST_ROUTING); + + ct = nf_ct_get(skb, &ctinfo); + nat = nfct_nat(ct); + + NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || + ctinfo == IP_CT_RELATED_REPLY)); + + /* Source address is 0.0.0.0 - locally generated packet that is + * probably not supposed to be masqueraded. + */ + if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0) + return NF_ACCEPT; + + rt = skb_rtable(skb); + nh = rt_nexthop(rt, ip_hdr(skb)->daddr); + newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE); + if (!newsrc) { + pr_info("%s ate my IP address\n", out->name); + return NF_DROP; + } + + nat->masq_index = out->ifindex; + + /* Transfer from original range. */ + memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); + memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); + newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; + newrange.min_addr.ip = newsrc; + newrange.max_addr.ip = newsrc; + newrange.min_proto = range->min_proto; + newrange.max_proto = range->max_proto; + + /* Hand modified range to generic setup. */ + return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); + +static int device_cmp(struct nf_conn *i, void *ifindex) +{ + const struct nf_conn_nat *nat = nfct_nat(i); + + if (!nat) + return 0; + if (nf_ct_l3num(i) != NFPROTO_IPV4) + return 0; + return nat->masq_index == (int)(long)ifindex; +} + +static int masq_device_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + const struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net *net = dev_net(dev); + + if (event == NETDEV_DOWN) { + /* Device was downed. Search entire table for + * conntracks which were associated with that device, + * and forget them. + */ + NF_CT_ASSERT(dev->ifindex != 0); + + nf_ct_iterate_cleanup(net, device_cmp, + (void *)(long)dev->ifindex, 0, 0); + } + + return NOTIFY_DONE; +} + +static int masq_inet_event(struct notifier_block *this, + unsigned long event, + void *ptr) +{ + struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev; + struct netdev_notifier_info info; + + netdev_notifier_info_init(&info, dev); + return masq_device_event(this, event, &info); +} + +static struct notifier_block masq_dev_notifier = { + .notifier_call = masq_device_event, +}; + +static struct notifier_block masq_inet_notifier = { + .notifier_call = masq_inet_event, +}; + +static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); + +void nf_nat_masquerade_ipv4_register_notifier(void) +{ + /* check if the notifier was already set */ + if (atomic_inc_return(&masquerade_notifier_refcount) > 1) + return; + + /* Register for device down reports */ + register_netdevice_notifier(&masq_dev_notifier); + /* Register IP address change reports */ + register_inetaddr_notifier(&masq_inet_notifier); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier); + +void nf_nat_masquerade_ipv4_unregister_notifier(void) +{ + /* check if the notifier still has clients */ + if (atomic_dec_return(&masquerade_notifier_refcount) > 0) + return; + + unregister_netdevice_notifier(&masq_dev_notifier); + unregister_inetaddr_notifier(&masq_inet_notifier); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>"); diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c index 3964157d826c..df547bf50078 100644 --- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c +++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c @@ -26,136 +26,53 @@ #include <net/netfilter/nf_nat_l3proto.h> #include <net/ip.h> -/* - * NAT chains - */ - -static unsigned int nf_nat_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct) { - enum ip_conntrack_info ctinfo; - struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - struct nf_conn_nat *nat; - enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); struct nft_pktinfo pkt; - unsigned int ret; - - if (ct == NULL || nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))); - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; - - switch (ctinfo) { - case IP_CT_RELATED: - case IP_CT_RELATED + IP_CT_IS_REPLY: - if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { - if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, - ops->hooknum)) - return NF_DROP; - else - return NF_ACCEPT; - } - /* Fall through */ - case IP_CT_NEW: - if (nf_nat_initialized(ct, maniptype)) - break; - nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); + nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out); - ret = nft_do_chain(&pkt, ops); - if (ret != NF_ACCEPT) - return ret; - if (!nf_nat_initialized(ct, maniptype)) { - ret = nf_nat_alloc_null_binding(ct, ops->hooknum); - if (ret != NF_ACCEPT) - return ret; - } - default: - break; - } - - return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); + return nft_do_chain(&pkt, ops); } -static unsigned int nf_nat_prerouting(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - __be32 daddr = ip_hdr(skb)->daddr; - unsigned int ret; - - ret = nf_nat_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - ip_hdr(skb)->daddr != daddr) { - skb_dst_drop(skb); - } - return ret; + return nf_nat_ipv4_fn(ops, skb, in, out, nft_nat_do_chain); } -static unsigned int nf_nat_postrouting(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - enum ip_conntrack_info ctinfo __maybe_unused; - const struct nf_conn *ct __maybe_unused; - unsigned int ret; - - ret = nf_nat_fn(ops, skb, in, out, okfn); -#ifdef CONFIG_XFRM - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (ct->tuplehash[dir].tuple.src.u3.ip != - ct->tuplehash[!dir].tuple.dst.u3.ip || - ct->tuplehash[dir].tuple.src.u.all != - ct->tuplehash[!dir].tuple.dst.u.all) - return nf_xfrm_me_harder(skb, AF_INET) == 0 ? - ret : NF_DROP; - } -#endif - return ret; + return nf_nat_ipv4_in(ops, skb, in, out, nft_nat_do_chain); } -static unsigned int nf_nat_output(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - enum ip_conntrack_info ctinfo; - const struct nf_conn *ct; - unsigned int ret; - - ret = nf_nat_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + return nf_nat_ipv4_out(ops, skb, in, out, nft_nat_do_chain); +} - if (ct->tuplehash[dir].tuple.dst.u3.ip != - ct->tuplehash[!dir].tuple.src.u3.ip) { - if (ip_route_me_harder(skb, RTN_UNSPEC)) - ret = NF_DROP; - } -#ifdef CONFIG_XFRM - else if (ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[!dir].tuple.src.u.all) - if (nf_xfrm_me_harder(skb, AF_INET)) - ret = NF_DROP; -#endif - } - return ret; +static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + return nf_nat_ipv4_local_fn(ops, skb, in, out, nft_nat_do_chain); } static const struct nf_chain_type nft_chain_nat_ipv4 = { @@ -168,10 +85,10 @@ static const struct nf_chain_type nft_chain_nat_ipv4 = { (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .hooks = { - [NF_INET_PRE_ROUTING] = nf_nat_prerouting, - [NF_INET_POST_ROUTING] = nf_nat_postrouting, - [NF_INET_LOCAL_OUT] = nf_nat_output, - [NF_INET_LOCAL_IN] = nf_nat_fn, + [NF_INET_PRE_ROUTING] = nft_nat_ipv4_in, + [NF_INET_POST_ROUTING] = nft_nat_ipv4_out, + [NF_INET_LOCAL_OUT] = nft_nat_ipv4_local_fn, + [NF_INET_LOCAL_IN] = nft_nat_ipv4_fn, }, }; diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c new file mode 100644 index 000000000000..6ea1d207b6a5 --- /dev/null +++ b/net/ipv4/netfilter/nft_masq_ipv4.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netlink.h> +#include <linux/netfilter.h> +#include <linux/netfilter/nf_tables.h> +#include <net/netfilter/nf_tables.h> +#include <net/netfilter/nft_masq.h> +#include <net/netfilter/ipv4/nf_nat_masquerade.h> + +static void nft_masq_ipv4_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt) +{ + struct nft_masq *priv = nft_expr_priv(expr); + struct nf_nat_range range; + unsigned int verdict; + + range.flags = priv->flags; + + verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, + &range, pkt->out); + + data[NFT_REG_VERDICT].verdict = verdict; +} + +static int nft_masq_ipv4_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + int err; + + err = nft_masq_init(ctx, expr, tb); + if (err < 0) + return err; + + nf_nat_masquerade_ipv4_register_notifier(); + return 0; +} + +static void nft_masq_ipv4_destroy(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + nf_nat_masquerade_ipv4_unregister_notifier(); +} + +static struct nft_expr_type nft_masq_ipv4_type; +static const struct nft_expr_ops nft_masq_ipv4_ops = { + .type = &nft_masq_ipv4_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)), + .eval = nft_masq_ipv4_eval, + .init = nft_masq_ipv4_init, + .destroy = nft_masq_ipv4_destroy, + .dump = nft_masq_dump, +}; + +static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { + .family = NFPROTO_IPV4, + .name = "masq", + .ops = &nft_masq_ipv4_ops, + .policy = nft_masq_policy, + .maxattr = NFTA_MASQ_MAX, + .owner = THIS_MODULE, +}; + +static int __init nft_masq_ipv4_module_init(void) +{ + return nft_register_expr(&nft_masq_ipv4_type); +} + +static void __exit nft_masq_ipv4_module_exit(void) +{ + nft_unregister_expr(&nft_masq_ipv4_type); +} + +module_init(nft_masq_ipv4_module_init); +module_exit(nft_masq_ipv4_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); +MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "masq"); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index a3c59a077a5f..57f7c9804139 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -311,7 +311,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) chk_addr_ret = RTN_LOCAL; - if ((sysctl_ip_nonlocal_bind == 0 && + if ((net->ipv4.sysctl_ip_nonlocal_bind == 0 && isk->freebind == 0 && isk->transparent == 0 && chk_addr_ret != RTN_LOCAL) || chk_addr_ret == RTN_MULTICAST || diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c index 46d6a1c923a8..4b7c0ec65251 100644 --- a/net/ipv4/protocol.c +++ b/net/ipv4/protocol.c @@ -30,6 +30,7 @@ const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly; +EXPORT_SYMBOL(inet_offloads); int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) { diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 173e7ea54c70..d4bd68dcdc39 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -596,12 +596,12 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) static inline u32 fnhe_hashfun(__be32 daddr) { + static u32 fnhe_hashrnd __read_mostly; u32 hval; - hval = (__force u32) daddr; - hval ^= (hval >> 11) ^ (hval >> 22); - - return hval & (FNHE_HASH_SIZE - 1); + net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); + hval = jhash_1word((__force u32) daddr, fnhe_hashrnd); + return hash_32(hval, FNHE_HASH_SHIFT); } static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) @@ -628,12 +628,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, spin_lock_bh(&fnhe_lock); - hash = nh->nh_exceptions; + hash = rcu_dereference(nh->nh_exceptions); if (!hash) { hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC); if (!hash) goto out_unlock; - nh->nh_exceptions = hash; + rcu_assign_pointer(nh->nh_exceptions, hash); } hash += hval; @@ -1242,7 +1242,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) { - struct fnhe_hash_bucket *hash = nh->nh_exceptions; + struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); struct fib_nh_exception *fnhe; u32 hval; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index c0c75688896e..0431a8f3c8f4 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -25,7 +25,7 @@ extern int sysctl_tcp_syncookies; -static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; +static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 79a007c52558..1599966f4639 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -286,13 +286,6 @@ static struct ctl_table ipv4_table[] = { .extra2 = &ip_ttl_max, }, { - .procname = "ip_nonlocal_bind", - .data = &sysctl_ip_nonlocal_bind, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { .procname = "tcp_syn_retries", .data = &sysctl_tcp_syn_retries, .maxlen = sizeof(int), @@ -450,6 +443,16 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, +#ifdef CONFIG_IP_MULTICAST + { + .procname = "igmp_qrv", + .data = &sysctl_igmp_qrv, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one + }, +#endif { .procname = "inet_peer_threshold", .data = &inet_peer_threshold, @@ -839,6 +842,13 @@ static struct ctl_table ipv4_net_table[] = { .proc_handler = proc_dointvec, }, { + .procname = "ip_nonlocal_bind", + .data = &init_net.ipv4.sysctl_ip_nonlocal_bind, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { .procname = "fwmark_reflect", .data = &init_net.ipv4.sysctl_fwmark_reflect, .maxlen = sizeof(int), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 541f26a67ba2..070aeff1b131 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1510,9 +1510,9 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { offset = seq - TCP_SKB_CB(skb)->seq; - if (tcp_hdr(skb)->syn) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) offset--; - if (offset < skb->len || tcp_hdr(skb)->fin) { + if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { *off = offset; return skb; } @@ -1585,7 +1585,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, if (offset + 1 != skb->len) continue; } - if (tcp_hdr(skb)->fin) { + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { sk_eat_skb(sk, skb, false); ++seq; break; @@ -1722,11 +1722,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, break; offset = *seq - TCP_SKB_CB(skb)->seq; - if (tcp_hdr(skb)->syn) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) offset--; if (offset < skb->len) goto found_ok_skb; - if (tcp_hdr(skb)->fin) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; WARN(!(flags & MSG_PEEK), "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", @@ -1959,7 +1959,7 @@ skip_copy: if (used + offset < skb->len) continue; - if (tcp_hdr(skb)->fin) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, copied_early); @@ -2160,8 +2160,10 @@ void tcp_close(struct sock *sk, long timeout) * reader process may not have drained the data yet! */ while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { - u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - - tcp_hdr(skb)->fin; + u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; + + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + len--; data_was_unread += len; __kfree_skb(skb); } diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index d5de69bc04f5..bb395d46a389 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -17,7 +17,6 @@ #include <linux/module.h> #include <net/tcp.h> - #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation * max_cwnd = snd_cwnd * beta */ @@ -46,11 +45,10 @@ MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold"); module_param(smooth_part, int, 0644); MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax"); - /* BIC TCP Parameters */ struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ - u32 last_max_cwnd; /* last maximum snd_cwnd */ + u32 last_max_cwnd; /* last maximum snd_cwnd */ u32 loss_cwnd; /* congestion window at last loss */ u32 last_cwnd; /* the last snd_cwnd */ u32 last_time; /* time when updated last_cwnd */ @@ -103,7 +101,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) /* binary increase */ if (cwnd < ca->last_max_cwnd) { - __u32 dist = (ca->last_max_cwnd - cwnd) + __u32 dist = (ca->last_max_cwnd - cwnd) / BICTCP_B; if (dist > max_increment) @@ -154,7 +152,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) bictcp_update(ca, tp->snd_cwnd); tcp_cong_avoid_ai(tp, ca->cnt); } - } /* @@ -177,7 +174,6 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk) ca->loss_cwnd = tp->snd_cwnd; - if (tp->snd_cwnd <= low_window) return max(tp->snd_cwnd >> 1U, 2U); else @@ -188,6 +184,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct bictcp *ca = inet_csk_ca(sk); + return max(tp->snd_cwnd, ca->loss_cwnd); } @@ -206,12 +203,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt) if (icsk->icsk_ca_state == TCP_CA_Open) { struct bictcp *ca = inet_csk_ca(sk); + cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; ca->delayed_ack += cnt; } } - static struct tcp_congestion_ops bictcp __read_mostly = { .init = bictcp_init, .ssthresh = bictcp_recalc_ssthresh, diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 7b09d8b49fa5..80248f56c89f 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -142,7 +142,6 @@ static int __init tcp_congestion_default(void) } late_initcall(tcp_congestion_default); - /* Build string with list of available congestion control values */ void tcp_get_available_congestion_control(char *buf, size_t maxlen) { @@ -154,7 +153,6 @@ void tcp_get_available_congestion_control(char *buf, size_t maxlen) offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); - } rcu_read_unlock(); } @@ -186,7 +184,6 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) offs += snprintf(buf + offs, maxlen - offs, "%s%s", offs == 0 ? "" : " ", ca->name); - } rcu_read_unlock(); } @@ -230,7 +227,6 @@ out: return ret; } - /* Change congestion control for socket */ int tcp_set_congestion_control(struct sock *sk, const char *name) { @@ -337,6 +333,7 @@ EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); u32 tcp_reno_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); + return max(tp->snd_cwnd >> 1U, 2U); } EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index a9bd8a4828a9..20de0118c98e 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -82,12 +82,13 @@ MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (mse /* BIC TCP Parameters */ struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ - u32 last_max_cwnd; /* last maximum snd_cwnd */ + u32 last_max_cwnd; /* last maximum snd_cwnd */ u32 loss_cwnd; /* congestion window at last loss */ u32 last_cwnd; /* the last snd_cwnd */ u32 last_time; /* time when updated last_cwnd */ u32 bic_origin_point;/* origin point of bic function */ - u32 bic_K; /* time to origin point from the beginning of the current epoch */ + u32 bic_K; /* time to origin point + from the beginning of the current epoch */ u32 delay_min; /* min delay (msec << 3) */ u32 epoch_start; /* beginning of an epoch */ u32 ack_cnt; /* number of acks */ @@ -219,7 +220,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) ca->last_time = tcp_time_stamp; if (ca->epoch_start == 0) { - ca->epoch_start = tcp_time_stamp; /* record the beginning of an epoch */ + ca->epoch_start = tcp_time_stamp; /* record beginning */ ca->ack_cnt = 1; /* start counting */ ca->tcp_cwnd = cwnd; /* syn with cubic */ @@ -263,9 +264,9 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) /* c/rtt * (t-K)^3 */ delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ); - if (t < ca->bic_K) /* below origin*/ + if (t < ca->bic_K) /* below origin*/ bic_target = ca->bic_origin_point - delta; - else /* above origin*/ + else /* above origin*/ bic_target = ca->bic_origin_point + delta; /* cubic function - calc bictcp_cnt*/ @@ -285,13 +286,14 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) /* TCP Friendly */ if (tcp_friendliness) { u32 scale = beta_scale; + delta = (cwnd * scale) >> 3; while (ca->ack_cnt > delta) { /* update tcp cwnd */ ca->ack_cnt -= delta; ca->tcp_cwnd++; } - if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */ + if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */ delta = ca->tcp_cwnd - cwnd; max_cnt = cwnd / delta; if (ca->cnt > max_cnt) @@ -320,7 +322,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) bictcp_update(ca, tp->snd_cwnd); tcp_cong_avoid_ai(tp, ca->cnt); } - } static u32 bictcp_recalc_ssthresh(struct sock *sk) @@ -452,7 +453,8 @@ static int __init cubictcp_register(void) * based on SRTT of 100ms */ - beta_scale = 8*(BICTCP_BETA_SCALE+beta)/ 3 / (BICTCP_BETA_SCALE - beta); + beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3 + / (BICTCP_BETA_SCALE - beta); cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */ diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index ed3f2ad42e0f..0d73f9ddb55b 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -9,7 +9,6 @@ * 2 of the License, or (at your option) any later version. */ - #include <linux/module.h> #include <linux/inet_diag.h> @@ -35,13 +34,13 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, } static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct inet_diag_req_v2 *r, struct nlattr *bc) + struct inet_diag_req_v2 *r, struct nlattr *bc) { inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc); } static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, - struct inet_diag_req_v2 *req) + struct inet_diag_req_v2 *req) { return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req); } diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index 1c4908280d92..882c08aae2f5 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c @@ -9,7 +9,6 @@ #include <linux/module.h> #include <net/tcp.h> - /* From AIMD tables from RFC 3649 appendix B, * with fixed-point MD scaled <<8. */ @@ -17,78 +16,78 @@ static const struct hstcp_aimd_val { unsigned int cwnd; unsigned int md; } hstcp_aimd_vals[] = { - { 38, 128, /* 0.50 */ }, - { 118, 112, /* 0.44 */ }, - { 221, 104, /* 0.41 */ }, - { 347, 98, /* 0.38 */ }, - { 495, 93, /* 0.37 */ }, - { 663, 89, /* 0.35 */ }, - { 851, 86, /* 0.34 */ }, - { 1058, 83, /* 0.33 */ }, - { 1284, 81, /* 0.32 */ }, - { 1529, 78, /* 0.31 */ }, - { 1793, 76, /* 0.30 */ }, - { 2076, 74, /* 0.29 */ }, - { 2378, 72, /* 0.28 */ }, - { 2699, 71, /* 0.28 */ }, - { 3039, 69, /* 0.27 */ }, - { 3399, 68, /* 0.27 */ }, - { 3778, 66, /* 0.26 */ }, - { 4177, 65, /* 0.26 */ }, - { 4596, 64, /* 0.25 */ }, - { 5036, 62, /* 0.25 */ }, - { 5497, 61, /* 0.24 */ }, - { 5979, 60, /* 0.24 */ }, - { 6483, 59, /* 0.23 */ }, - { 7009, 58, /* 0.23 */ }, - { 7558, 57, /* 0.22 */ }, - { 8130, 56, /* 0.22 */ }, - { 8726, 55, /* 0.22 */ }, - { 9346, 54, /* 0.21 */ }, - { 9991, 53, /* 0.21 */ }, - { 10661, 52, /* 0.21 */ }, - { 11358, 52, /* 0.20 */ }, - { 12082, 51, /* 0.20 */ }, - { 12834, 50, /* 0.20 */ }, - { 13614, 49, /* 0.19 */ }, - { 14424, 48, /* 0.19 */ }, - { 15265, 48, /* 0.19 */ }, - { 16137, 47, /* 0.19 */ }, - { 17042, 46, /* 0.18 */ }, - { 17981, 45, /* 0.18 */ }, - { 18955, 45, /* 0.18 */ }, - { 19965, 44, /* 0.17 */ }, - { 21013, 43, /* 0.17 */ }, - { 22101, 43, /* 0.17 */ }, - { 23230, 42, /* 0.17 */ }, - { 24402, 41, /* 0.16 */ }, - { 25618, 41, /* 0.16 */ }, - { 26881, 40, /* 0.16 */ }, - { 28193, 39, /* 0.16 */ }, - { 29557, 39, /* 0.15 */ }, - { 30975, 38, /* 0.15 */ }, - { 32450, 38, /* 0.15 */ }, - { 33986, 37, /* 0.15 */ }, - { 35586, 36, /* 0.14 */ }, - { 37253, 36, /* 0.14 */ }, - { 38992, 35, /* 0.14 */ }, - { 40808, 35, /* 0.14 */ }, - { 42707, 34, /* 0.13 */ }, - { 44694, 33, /* 0.13 */ }, - { 46776, 33, /* 0.13 */ }, - { 48961, 32, /* 0.13 */ }, - { 51258, 32, /* 0.13 */ }, - { 53677, 31, /* 0.12 */ }, - { 56230, 30, /* 0.12 */ }, - { 58932, 30, /* 0.12 */ }, - { 61799, 29, /* 0.12 */ }, - { 64851, 28, /* 0.11 */ }, - { 68113, 28, /* 0.11 */ }, - { 71617, 27, /* 0.11 */ }, - { 75401, 26, /* 0.10 */ }, - { 79517, 26, /* 0.10 */ }, - { 84035, 25, /* 0.10 */ }, - { 89053, 24, /* 0.10 */ }, + { 38, 128, /* 0.50 */ }, + { 118, 112, /* 0.44 */ }, + { 221, 104, /* 0.41 */ }, + { 347, 98, /* 0.38 */ }, + { 495, 93, /* 0.37 */ }, + { 663, 89, /* 0.35 */ }, + { 851, 86, /* 0.34 */ }, + { 1058, 83, /* 0.33 */ }, + { 1284, 81, /* 0.32 */ }, + { 1529, 78, /* 0.31 */ }, + { 1793, 76, /* 0.30 */ }, + { 2076, 74, /* 0.29 */ }, + { 2378, 72, /* 0.28 */ }, + { 2699, 71, /* 0.28 */ }, + { 3039, 69, /* 0.27 */ }, + { 3399, 68, /* 0.27 */ }, + { 3778, 66, /* 0.26 */ }, + { 4177, 65, /* 0.26 */ }, + { 4596, 64, /* 0.25 */ }, + { 5036, 62, /* 0.25 */ }, + { 5497, 61, /* 0.24 */ }, + { 5979, 60, /* 0.24 */ }, + { 6483, 59, /* 0.23 */ }, + { 7009, 58, /* 0.23 */ }, + { 7558, 57, /* 0.22 */ }, + { 8130, 56, /* 0.22 */ }, + { 8726, 55, /* 0.22 */ }, + { 9346, 54, /* 0.21 */ }, + { 9991, 53, /* 0.21 */ }, + { 10661, 52, /* 0.21 */ }, + { 11358, 52, /* 0.20 */ }, + { 12082, 51, /* 0.20 */ }, + { 12834, 50, /* 0.20 */ }, + { 13614, 49, /* 0.19 */ }, + { 14424, 48, /* 0.19 */ }, + { 15265, 48, /* 0.19 */ }, + { 16137, 47, /* 0.19 */ }, + { 17042, 46, /* 0.18 */ }, + { 17981, 45, /* 0.18 */ }, + { 18955, 45, /* 0.18 */ }, + { 19965, 44, /* 0.17 */ }, + { 21013, 43, /* 0.17 */ }, + { 22101, 43, /* 0.17 */ }, + { 23230, 42, /* 0.17 */ }, + { 24402, 41, /* 0.16 */ }, + { 25618, 41, /* 0.16 */ }, + { 26881, 40, /* 0.16 */ }, + { 28193, 39, /* 0.16 */ }, + { 29557, 39, /* 0.15 */ }, + { 30975, 38, /* 0.15 */ }, + { 32450, 38, /* 0.15 */ }, + { 33986, 37, /* 0.15 */ }, + { 35586, 36, /* 0.14 */ }, + { 37253, 36, /* 0.14 */ }, + { 38992, 35, /* 0.14 */ }, + { 40808, 35, /* 0.14 */ }, + { 42707, 34, /* 0.13 */ }, + { 44694, 33, /* 0.13 */ }, + { 46776, 33, /* 0.13 */ }, + { 48961, 32, /* 0.13 */ }, + { 51258, 32, /* 0.13 */ }, + { 53677, 31, /* 0.12 */ }, + { 56230, 30, /* 0.12 */ }, + { 58932, 30, /* 0.12 */ }, + { 61799, 29, /* 0.12 */ }, + { 64851, 28, /* 0.11 */ }, + { 68113, 28, /* 0.11 */ }, + { 71617, 27, /* 0.11 */ }, + { 75401, 26, /* 0.10 */ }, + { 79517, 26, /* 0.10 */ }, + { 84035, 25, /* 0.10 */ }, + { 89053, 24, /* 0.10 */ }, }; #define HSTCP_AIMD_MAX ARRAY_SIZE(hstcp_aimd_vals) diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 031361311a8b..58469fff6c18 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -98,7 +98,8 @@ static inline void measure_rtt(struct sock *sk, u32 srtt) } } -static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt) +static void measure_achieved_throughput(struct sock *sk, + u32 pkts_acked, s32 rtt) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_sock *tp = tcp_sk(sk); @@ -148,8 +149,8 @@ static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT) if (use_bandwidth_switch) { u32 maxB = ca->maxB; u32 old_maxB = ca->old_maxB; - ca->old_maxB = ca->maxB; + ca->old_maxB = ca->maxB; if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { ca->beta = BETA_MIN; ca->modeswitch = 0; @@ -270,6 +271,7 @@ static void htcp_state(struct sock *sk, u8 new_state) case TCP_CA_Open: { struct htcp *ca = inet_csk_ca(sk); + if (ca->undo_last_cong) { ca->last_cong = jiffies; ca->undo_last_cong = 0; diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index d8f8f05a4951..f963b274f2b0 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c @@ -29,7 +29,6 @@ static int rtt0 = 25; module_param(rtt0, int, 0644); MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)"); - /* This is called to refresh values for hybla parameters */ static inline void hybla_recalc_param (struct sock *sk) { diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 5999b3972e64..1d5a30a90adf 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -284,7 +284,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; if (delta >= tp->snd_cwnd) { tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, - (u32) tp->snd_cwnd_clamp); + (u32)tp->snd_cwnd_clamp); tp->snd_cwnd_cnt = 0; } } @@ -299,7 +299,6 @@ static u32 tcp_illinois_ssthresh(struct sock *sk) return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); } - /* Extract info for Tcp socket info provided via netlink. */ static void tcp_illinois_info(struct sock *sk, u32 ext, struct sk_buff *skb) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a906e0200ff2..13f3da4762e3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1888,21 +1888,21 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp) tp->sacked_out = 0; } -static void tcp_clear_retrans_partial(struct tcp_sock *tp) +void tcp_clear_retrans(struct tcp_sock *tp) { tp->retrans_out = 0; tp->lost_out = 0; - tp->undo_marker = 0; tp->undo_retrans = -1; + tp->fackets_out = 0; + tp->sacked_out = 0; } -void tcp_clear_retrans(struct tcp_sock *tp) +static inline void tcp_init_undo(struct tcp_sock *tp) { - tcp_clear_retrans_partial(tp); - - tp->fackets_out = 0; - tp->sacked_out = 0; + tp->undo_marker = tp->snd_una; + /* Retransmission still in flight may cause DSACKs later. */ + tp->undo_retrans = tp->retrans_out ? : -1; } /* Enter Loss state. If we detect SACK reneging, forget all SACK information @@ -1925,18 +1925,18 @@ void tcp_enter_loss(struct sock *sk) tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); + tcp_init_undo(tp); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; - tcp_clear_retrans_partial(tp); + tp->retrans_out = 0; + tp->lost_out = 0; if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); - tp->undo_marker = tp->snd_una; - skb = tcp_write_queue_head(sk); is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); if (is_reneg) { @@ -1950,9 +1950,6 @@ void tcp_enter_loss(struct sock *sk) if (skb == tcp_send_head(sk)) break; - if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) - tp->undo_marker = 0; - TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; @@ -2671,8 +2668,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) NET_INC_STATS_BH(sock_net(sk), mib_idx); tp->prior_ssthresh = 0; - tp->undo_marker = tp->snd_una; - tp->undo_retrans = tp->retrans_out ? : -1; + tcp_init_undo(tp); if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { if (!ece_ack) @@ -2971,7 +2967,8 @@ void tcp_rearm_rto(struct sock *sk) if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); - const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; + const u32 rto_time_stamp = + tcp_skb_timestamp(skb) + rto; s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); /* delta may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. @@ -3211,9 +3208,10 @@ static void tcp_ack_probe(struct sock *sk) * This function is not for random using! */ } else { + unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), - TCP_RTO_MAX); + when, TCP_RTO_MAX); } } @@ -4096,7 +4094,7 @@ static void tcp_ofo_queue(struct sock *sk) __skb_unlink(skb, &tp->out_of_order_queue); __skb_queue_tail(&sk->sk_receive_queue, skb); tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; - if (tcp_hdr(skb)->fin) + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); } } @@ -4146,9 +4144,6 @@ static bool tcp_try_coalesce(struct sock *sk, *fragstolen = false; - if (tcp_hdr(from)->fin) - return false; - /* Its possible this segment overlaps with prior segment in queue */ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false; @@ -4161,6 +4156,7 @@ static bool tcp_try_coalesce(struct sock *sk, NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; + TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; return true; } @@ -4309,24 +4305,19 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) { - struct sk_buff *skb = NULL; - struct tcphdr *th; + struct sk_buff *skb; bool fragstolen; if (size == 0) return 0; - skb = alloc_skb(size + sizeof(*th), sk->sk_allocation); + skb = alloc_skb(size, sk->sk_allocation); if (!skb) goto err; - if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th))) + if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) goto err_free; - th = (struct tcphdr *)skb_put(skb, sizeof(*th)); - skb_reset_transport_header(skb); - memset(th, 0, sizeof(*th)); - if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size)) goto err_free; @@ -4334,7 +4325,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size; TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1; - if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) { + if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) { WARN_ON_ONCE(fragstolen); /* should not happen */ __kfree_skb(skb); } @@ -4516,7 +4507,7 @@ restart: * - bloated or contains data before "start" or * overlaps to the next one. */ - if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && + if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) && (tcp_win_from_space(skb->truesize) > skb->len || before(TCP_SKB_CB(skb)->seq, start))) { end_of_skbs = false; @@ -4535,30 +4526,18 @@ restart: /* Decided to skip this, advance start seq. */ start = TCP_SKB_CB(skb)->end_seq; } - if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) + if (end_of_skbs || + (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) return; while (before(start, end)) { + int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start); struct sk_buff *nskb; - unsigned int header = skb_headroom(skb); - int copy = SKB_MAX_ORDER(header, 0); - /* Too big header? This can happen with IPv6. */ - if (copy < 0) - return; - if (end - start < copy) - copy = end - start; - nskb = alloc_skb(copy + header, GFP_ATOMIC); + nskb = alloc_skb(copy, GFP_ATOMIC); if (!nskb) return; - skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); - skb_set_network_header(nskb, (skb_network_header(skb) - - skb->head)); - skb_set_transport_header(nskb, (skb_transport_header(skb) - - skb->head)); - skb_reserve(nskb, header); - memcpy(nskb->head, skb->head, header); memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; __skb_queue_before(list, skb, nskb); @@ -4582,8 +4561,7 @@ restart: skb = tcp_collapse_one(sk, skb, list); if (!skb || skb == tail || - tcp_hdr(skb)->syn || - tcp_hdr(skb)->fin) + (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) return; } } @@ -5910,7 +5888,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, struct request_sock *req; struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = NULL; - __u32 isn = TCP_SKB_CB(skb)->when; + __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; bool want_cookie = false, fastopen; struct flowi fl; struct tcp_fastopen_cookie foc = { .len = -1 }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index cd17f009aede..3b2e49cb2b61 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -90,7 +90,6 @@ int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; EXPORT_SYMBOL(sysctl_tcp_low_latency); - #ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); @@ -431,15 +430,16 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) break; icsk->icsk_backoff--; - inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) : - TCP_TIMEOUT_INIT) << icsk->icsk_backoff; - tcp_bound_rto(sk); + icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : + TCP_TIMEOUT_INIT; + icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); skb = tcp_write_queue_head(sk); BUG_ON(!skb); - remaining = icsk->icsk_rto - min(icsk->icsk_rto, - tcp_time_stamp - TCP_SKB_CB(skb)->when); + remaining = icsk->icsk_rto - + min(icsk->icsk_rto, + tcp_time_stamp - tcp_skb_timestamp(skb)); if (remaining) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, @@ -1269,7 +1269,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { .send_ack = tcp_v4_reqsk_send_ack, .destructor = tcp_v4_reqsk_destructor, .send_reset = tcp_v4_send_reset, - .syn_ack_timeout = tcp_syn_ack_timeout, + .syn_ack_timeout = tcp_syn_ack_timeout, }; static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { @@ -1559,7 +1559,17 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) skb_queue_len(&tp->ucopy.prequeue) == 0) return false; - skb_dst_force(skb); + /* Before escaping RCU protected region, we need to take care of skb + * dst. Prequeue is only enabled for established sockets. + * For such sockets, we might need the skb dst only to set sk->sk_rx_dst + * Instead of doing full sk_rx_dst validity here, let's perform + * an optimistic check. + */ + if (likely(sk->sk_rx_dst)) + skb_dst_drop(skb); + else + skb_dst_force(skb); + __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; if (tp->ucopy.memory > sk->sk_rcvbuf) { @@ -1628,7 +1638,8 @@ int tcp_v4_rcv(struct sk_buff *skb) TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); - TCP_SKB_CB(skb)->when = 0; + TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); + TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; @@ -1765,9 +1776,11 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - dst_hold(dst); - sk->sk_rx_dst = dst; - inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + if (dst) { + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + } } EXPORT_SYMBOL(inet_sk_rx_dst_set); @@ -2183,7 +2196,7 @@ int tcp_seq_open(struct inode *inode, struct file *file) s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; - s->last_pos = 0; + s->last_pos = 0; return 0; } EXPORT_SYMBOL(tcp_seq_open); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 1649988bd1b6..a058f411d3a6 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -232,7 +232,7 @@ kill: u32 isn = tcptw->tw_snd_nxt + 65535 + 2; if (isn == 0) isn++; - TCP_SKB_CB(skb)->when = isn; + TCP_SKB_CB(skb)->tcp_tw_isn = isn; return TCP_TW_SYN; } diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index bc1b83cb8309..72912533a191 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -288,35 +288,14 @@ static int tcp_v4_gso_send_check(struct sk_buff *skb) static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) { - /* Use the IP hdr immediately proceeding for this transport */ - const struct iphdr *iph = skb_gro_network_header(skb); - __wsum wsum; - /* Don't bother verifying checksum if we're going to flush anyway. */ - if (NAPI_GRO_CB(skb)->flush) - goto skip_csum; - - wsum = NAPI_GRO_CB(skb)->csum; - - switch (skb->ip_summed) { - case CHECKSUM_NONE: - wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), - 0); - - /* fall through */ - - case CHECKSUM_COMPLETE: - if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr, - wsum)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } - + if (!NAPI_GRO_CB(skb)->flush && + skb_gro_checksum_validate(skb, IPPROTO_TCP, + inet_gro_compute_pseudo)) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } -skip_csum: return tcp_gro_receive(head, skb); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5a7c41fbc6d3..8c61a7c0c889 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -550,7 +550,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { opts->options |= OPTION_TS; - opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; + opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; opts->tsecr = tp->rx_opt.ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } @@ -618,7 +618,7 @@ static unsigned int tcp_synack_options(struct sock *sk, } if (likely(ireq->tstamp_ok)) { opts->options |= OPTION_TS; - opts->tsval = TCP_SKB_CB(skb)->when; + opts->tsval = tcp_skb_timestamp(skb); opts->tsecr = req->ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } @@ -647,7 +647,6 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { - struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; struct tcp_sock *tp = tcp_sk(sk); unsigned int size = 0; unsigned int eff_sacks; @@ -666,7 +665,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb if (likely(tp->rx_opt.tstamp_ok)) { opts->options |= OPTION_TS; - opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; + opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0; opts->tsecr = tp->rx_opt.ts_recent; size += TCPOLEN_TSTAMP_ALIGNED; } @@ -886,8 +885,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, skb = skb_clone(skb, gfp_mask); if (unlikely(!skb)) return -ENOBUFS; - /* Our usage of tstamp should remain private */ - skb->tstamp.tv64 = 0; } inet = inet_sk(sk); @@ -975,7 +972,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); + /* Our usage of tstamp should remain private */ + skb->tstamp.tv64 = 0; err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); + if (likely(err <= 0)) return err; @@ -1146,10 +1146,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, buff->ip_summed = skb->ip_summed; - /* Looks stupid, but our code really uses when of - * skbs, which it never sent before. --ANK - */ - TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; buff->tstamp = skb->tstamp; tcp_fragment_tstamp(skb, buff); @@ -1874,8 +1870,8 @@ static int tcp_mtu_probe(struct sock *sk) tcp_init_tso_segs(sk, nskb, nskb->len); /* We're ready to send. If this fails, the probe will - * be resegmented into mss-sized pieces by tcp_write_xmit(). */ - TCP_SKB_CB(nskb)->when = tcp_time_stamp; + * be resegmented into mss-sized pieces by tcp_write_xmit(). + */ if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { /* Decrement cwnd here because we are sending * effectively two packets. */ @@ -1935,8 +1931,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, BUG_ON(!tso_segs); if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { - /* "when" is used as a start point for the retransmit timer */ - TCP_SKB_CB(skb)->when = tcp_time_stamp; + /* "skb_mstamp" is used as a start point for the retransmit timer */ + skb_mstamp_get(&skb->skb_mstamp); goto repair; /* Skip network transmission */ } @@ -2000,8 +1996,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) break; - TCP_SKB_CB(skb)->when = tcp_time_stamp; - if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) break; @@ -2499,7 +2493,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) /* Make a copy, if the first transmission SKB clone we made * is still in somebody's hands, else make a clone. */ - TCP_SKB_CB(skb)->when = tcp_time_stamp; /* make sure skb->data is aligned on arches that require it * and check if ack-trimming & collapsing extended the headroom @@ -2544,7 +2537,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) /* Save stamp of the first retransmit. */ if (!tp->retrans_stamp) - tp->retrans_stamp = TCP_SKB_CB(skb)->when; + tp->retrans_stamp = tcp_skb_timestamp(skb); /* snd_nxt is stored to detect loss of retransmitted segment, * see tcp_input.c tcp_sacktag_write_queue(). @@ -2752,7 +2745,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), TCPHDR_ACK | TCPHDR_RST); /* Send it off. */ - TCP_SKB_CB(skb)->when = tcp_time_stamp; if (tcp_transmit_skb(sk, skb, 0, priority)) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); @@ -2791,7 +2783,6 @@ int tcp_send_synack(struct sock *sk) TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; TCP_ECN_send_synack(tcp_sk(sk), skb); } - TCP_SKB_CB(skb)->when = tcp_time_stamp; return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); } @@ -2835,10 +2826,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, memset(&opts, 0, sizeof(opts)); #ifdef CONFIG_SYN_COOKIES if (unlikely(req->cookie_ts)) - TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); + skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); else #endif - TCP_SKB_CB(skb)->when = tcp_time_stamp; + skb_mstamp_get(&skb->skb_mstamp); tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, foc) + sizeof(*th); @@ -3086,7 +3077,7 @@ int tcp_connect(struct sock *sk) skb_reserve(buff, MAX_TCP_HEADER); tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); - tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; + tp->retrans_stamp = tcp_time_stamp; tcp_connect_queue_skb(sk, buff); TCP_ECN_send_syn(sk, buff); @@ -3194,7 +3185,7 @@ void tcp_send_ack(struct sock *sk) tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); /* Send it off, this clears delayed acks for us. */ - TCP_SKB_CB(buff)->when = tcp_time_stamp; + skb_mstamp_get(&buff->skb_mstamp); tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); } @@ -3226,7 +3217,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) * send it. */ tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); - TCP_SKB_CB(skb)->when = tcp_time_stamp; + skb_mstamp_get(&skb->skb_mstamp); return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); } @@ -3270,7 +3261,6 @@ int tcp_write_wakeup(struct sock *sk) tcp_set_skb_tso_segs(sk, skb, mss); TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; - TCP_SKB_CB(skb)->when = tcp_time_stamp; err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); if (!err) tcp_event_new_data_sent(sk, skb); @@ -3289,6 +3279,7 @@ void tcp_send_probe0(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); + unsigned long probe_max; int err; err = tcp_write_wakeup(sk); @@ -3304,9 +3295,7 @@ void tcp_send_probe0(struct sock *sk) if (icsk->icsk_backoff < sysctl_tcp_retries2) icsk->icsk_backoff++; icsk->icsk_probes_out++; - inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), - TCP_RTO_MAX); + probe_max = TCP_RTO_MAX; } else { /* If packet was not sent due to local congestion, * do not backoff and do not remember icsk_probes_out. @@ -3316,11 +3305,11 @@ void tcp_send_probe0(struct sock *sk) */ if (!icsk->icsk_probes_out) icsk->icsk_probes_out = 1; - inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - min(icsk->icsk_rto << icsk->icsk_backoff, - TCP_RESOURCE_PROBE_INTERVAL), - TCP_RTO_MAX); + probe_max = TCP_RESOURCE_PROBE_INTERVAL; } + inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, + inet_csk_rto_backoff(icsk, probe_max), + TCP_RTO_MAX); } int tcp_rtx_synack(struct sock *sk, struct request_sock *req) diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 3b66610d4156..ebf5ff57526e 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -83,7 +83,6 @@ static struct { struct tcp_log *log; } tcp_probe; - static inline int tcp_probe_used(void) { return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); @@ -101,7 +100,6 @@ static inline int tcp_probe_avail(void) si4.sin_addr.s_addr = inet->inet_##mem##addr; \ } while (0) \ - /* * Hook inserted to be called before each receive packet. * Note: arguments must match tcp_rcv_established()! @@ -194,8 +192,8 @@ static int tcpprobe_sprint(char *tbuf, int n) return scnprintf(tbuf, n, "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n", - (unsigned long) tv.tv_sec, - (unsigned long) tv.tv_nsec, + (unsigned long)tv.tv_sec, + (unsigned long)tv.tv_nsec, &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una, p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd); } diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index 8250949b8853..6824afb65d93 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c @@ -31,10 +31,10 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) static u32 tcp_scalable_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); + return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); } - static struct tcp_congestion_ops tcp_scalable __read_mostly = { .ssthresh = tcp_scalable_ssthresh, .cong_avoid = tcp_scalable_cong_avoid, diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index df90cd1ce37f..b24360f6e293 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -135,10 +135,9 @@ static bool retransmits_timed_out(struct sock *sk, if (!inet_csk(sk)->icsk_retransmits) return false; - if (unlikely(!tcp_sk(sk)->retrans_stamp)) - start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when; - else - start_ts = tcp_sk(sk)->retrans_stamp; + start_ts = tcp_sk(sk)->retrans_stamp; + if (unlikely(!start_ts)) + start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk)); if (likely(timeout == 0)) { linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); @@ -181,7 +180,7 @@ static int tcp_write_timeout(struct sock *sk) retry_until = sysctl_tcp_retries2; if (sock_flag(sk, SOCK_DEAD)) { - const int alive = (icsk->icsk_rto < TCP_RTO_MAX); + const int alive = icsk->icsk_rto < TCP_RTO_MAX; retry_until = tcp_orphan_retries(sk, alive); do_reset = alive || @@ -295,7 +294,7 @@ static void tcp_probe_timer(struct sock *sk) max_probes = sysctl_tcp_retries2; if (sock_flag(sk, SOCK_DEAD)) { - const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); + const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; max_probes = tcp_orphan_retries(sk, alive); diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index b40ad897f945..a6afde666ab1 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -51,7 +51,6 @@ MODULE_PARM_DESC(beta, "upper bound of packets in network"); module_param(gamma, int, 0644); MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)"); - /* There are several situations when we must "re-start" Vegas: * * o when a connection is established @@ -133,7 +132,6 @@ EXPORT_SYMBOL_GPL(tcp_vegas_pkts_acked); void tcp_vegas_state(struct sock *sk, u8 ca_state) { - if (ca_state == TCP_CA_Open) vegas_enable(sk); else @@ -285,7 +283,6 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) /* Use normal slow start */ else if (tp->snd_cwnd <= tp->snd_ssthresh) tcp_slow_start(tp, acked); - } /* Extract info for Tcp socket info provided via netlink. */ diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index 8276977d2c85..a4d2d2d88dca 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -175,7 +175,6 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) } else tp->snd_cwnd_cnt++; } - } if (tp->snd_cwnd < 2) tp->snd_cwnd = 2; diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index b94a04ae2ed5..81911a92356c 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -42,7 +42,6 @@ struct westwood { u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/ }; - /* TCP Westwood functions and constants */ #define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */ #define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */ @@ -153,7 +152,6 @@ static inline void update_rtt_min(struct westwood *w) w->rtt_min = min(w->rtt, w->rtt_min); } - /* * @westwood_fast_bw * It is called when we are in fast path. In particular it is called when @@ -208,7 +206,6 @@ static inline u32 westwood_acked_count(struct sock *sk) return w->cumul_ack; } - /* * TCP Westwood * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it @@ -219,6 +216,7 @@ static u32 tcp_westwood_bw_rttmin(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct westwood *w = inet_csk_ca(sk); + return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2); } @@ -254,12 +252,12 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) } } - /* Extract info for Tcp socket info provided via netlink. */ static void tcp_westwood_info(struct sock *sk, u32 ext, struct sk_buff *skb) { const struct westwood *ca = inet_csk_ca(sk); + if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { struct tcpvegas_info info = { .tcpv_enabled = 1, @@ -271,7 +269,6 @@ static void tcp_westwood_info(struct sock *sk, u32 ext, } } - static struct tcp_congestion_ops tcp_westwood __read_mostly = { .init = tcp_westwood_init, .ssthresh = tcp_reno_ssthresh, diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index 599b79b8eac0..cd7273218598 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -54,10 +54,8 @@ static void tcp_yeah_init(struct sock *sk) /* Ensure the MD arithmetic works. This is somewhat pedantic, * since I don't think we will see a cwnd this large. :) */ tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); - } - static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us) { const struct inet_connection_sock *icsk = inet_csk(sk); @@ -84,7 +82,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) /* Scalable */ tp->snd_cwnd_cnt += yeah->pkts_acked; - if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ + if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)) { if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; tp->snd_cwnd_cnt = 0; @@ -120,7 +118,6 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) */ if (after(ack, yeah->vegas.beg_snd_nxt)) { - /* We do the Vegas calculations only if we got enough RTT * samples that we can be reasonably sure that we got * at least one RTT sample that wasn't from a delayed ACK. @@ -189,7 +186,6 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) } yeah->lastQ = queue; - } /* Save the extent of the current window so we can use this @@ -205,7 +201,8 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) } } -static u32 tcp_yeah_ssthresh(struct sock *sk) { +static u32 tcp_yeah_ssthresh(struct sock *sk) +{ const struct tcp_sock *tp = tcp_sk(sk); struct yeah *yeah = inet_csk_ca(sk); u32 reduction; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index f57c0e4c2326..cd0db5471bb5 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -99,6 +99,7 @@ #include <linux/slab.h> #include <net/tcp_states.h> #include <linux/skbuff.h> +#include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/net_namespace.h> @@ -224,7 +225,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, remaining = (high - low) + 1; rand = prandom_u32(); - first = (((u64)rand * remaining) >> 32) + low; + first = reciprocal_scale(rand, remaining) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE */ @@ -448,7 +449,7 @@ begin: } } else if (score == badness && reuseport) { matches++; - if (((u64)hash * matches) >> 32 == 0) + if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } @@ -529,7 +530,7 @@ begin: } } else if (score == badness && reuseport) { matches++; - if (((u64)hash * matches) >> 32 == 0) + if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } @@ -1787,6 +1788,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (sk != NULL) { int ret; + if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + inet_compute_pseudo); + ret = udp_queue_rcv_skb(sk, skb); sock_put(sk); @@ -1967,7 +1972,7 @@ void udp_v4_early_demux(struct sk_buff *skb) return; skb->sk = sk; - skb->destructor = sock_edemux; + skb->destructor = sock_efree; dst = sk->sk_rx_dst; if (dst) diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 59035bc3008d..d7c43f764c71 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -228,30 +228,24 @@ unlock: } EXPORT_SYMBOL(udp_del_offload); -static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb) +struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, + struct udphdr *uh) { struct udp_offload_priv *uo_priv; struct sk_buff *p, **pp = NULL; - struct udphdr *uh, *uh2; - unsigned int hlen, off; + struct udphdr *uh2; + unsigned int off = skb_gro_offset(skb); int flush = 1; if (NAPI_GRO_CB(skb)->udp_mark || - (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE)) + (skb->ip_summed != CHECKSUM_PARTIAL && + NAPI_GRO_CB(skb)->csum_cnt == 0 && + !NAPI_GRO_CB(skb)->csum_valid)) goto out; /* mark that this skb passed once through the udp gro layer */ NAPI_GRO_CB(skb)->udp_mark = 1; - off = skb_gro_offset(skb); - hlen = off + sizeof(*uh); - uh = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, hlen)) { - uh = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!uh)) - goto out; - } - rcu_read_lock(); uo_priv = rcu_dereference(udp_offload_base); for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { @@ -269,7 +263,12 @@ unflush: continue; uh2 = (struct udphdr *)(p->data + off); - if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { + + /* Match ports and either checksums are either both zero + * or nonzero. + */ + if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || + (!uh->check ^ !uh2->check)) { NAPI_GRO_CB(p)->same_flow = 0; continue; } @@ -277,6 +276,7 @@ unflush: skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); + NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; pp = uo_priv->offload->callbacks.gro_receive(head, skb); out_unlock: @@ -286,7 +286,33 @@ out: return pp; } -static int udp_gro_complete(struct sk_buff *skb, int nhoff) +static struct sk_buff **udp4_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + struct udphdr *uh = udp_gro_udphdr(skb); + + if (unlikely(!uh)) + goto flush; + + /* Don't bother verifying checksum if we're going to flush anyway. */ + if (NAPI_GRO_CB(skb)->flush) + goto skip; + + if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, + inet_gro_compute_pseudo)) + goto flush; + else if (uh->check) + skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + inet_gro_compute_pseudo); +skip: + return udp_gro_receive(head, skb, uh); + +flush: + NAPI_GRO_CB(skb)->flush = 1; + return NULL; +} + +int udp_gro_complete(struct sk_buff *skb, int nhoff) { struct udp_offload_priv *uo_priv; __be16 newlen = htons(skb->len - nhoff); @@ -304,19 +330,33 @@ static int udp_gro_complete(struct sk_buff *skb, int nhoff) break; } - if (uo_priv != NULL) + if (uo_priv != NULL) { + NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr)); + } rcu_read_unlock(); return err; } +static int udp4_gro_complete(struct sk_buff *skb, int nhoff) +{ + const struct iphdr *iph = ip_hdr(skb); + struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); + + if (uh->check) + uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, + iph->daddr, 0); + + return udp_gro_complete(skb, nhoff); +} + static const struct net_offload udpv4_offload = { .callbacks = { .gso_send_check = udp4_ufo_send_check, .gso_segment = udp4_ufo_fragment, - .gro_receive = udp_gro_receive, - .gro_complete = udp_gro_complete, + .gro_receive = udp4_gro_receive, + .gro_complete = udp4_gro_complete, }, }; diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c index 61ec1a65207e..1671263e5fa0 100644 --- a/net/ipv4/udp_tunnel.c +++ b/net/ipv4/udp_tunnel.c @@ -8,83 +8,40 @@ #include <net/udp_tunnel.h> #include <net/net_namespace.h> -int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, - struct socket **sockp) +int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) { - int err = -EINVAL; + int err; struct socket *sock = NULL; + struct sockaddr_in udp_addr; -#if IS_ENABLED(CONFIG_IPV6) - if (cfg->family == AF_INET6) { - struct sockaddr_in6 udp6_addr; + err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock); + if (err < 0) + goto error; - err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock); - if (err < 0) - goto error; - - sk_change_net(sock->sk, net); - - udp6_addr.sin6_family = AF_INET6; - memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6, - sizeof(udp6_addr.sin6_addr)); - udp6_addr.sin6_port = cfg->local_udp_port; - err = kernel_bind(sock, (struct sockaddr *)&udp6_addr, - sizeof(udp6_addr)); - if (err < 0) - goto error; - - if (cfg->peer_udp_port) { - udp6_addr.sin6_family = AF_INET6; - memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6, - sizeof(udp6_addr.sin6_addr)); - udp6_addr.sin6_port = cfg->peer_udp_port; - err = kernel_connect(sock, - (struct sockaddr *)&udp6_addr, - sizeof(udp6_addr), 0); - } - if (err < 0) - goto error; + sk_change_net(sock->sk, net); - udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums); - udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums); - } else -#endif - if (cfg->family == AF_INET) { - struct sockaddr_in udp_addr; - - err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock); - if (err < 0) - goto error; - - sk_change_net(sock->sk, net); + udp_addr.sin_family = AF_INET; + udp_addr.sin_addr = cfg->local_ip; + udp_addr.sin_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr)); + if (err < 0) + goto error; + if (cfg->peer_udp_port) { udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->local_ip; - udp_addr.sin_port = cfg->local_udp_port; - err = kernel_bind(sock, (struct sockaddr *)&udp_addr, - sizeof(udp_addr)); + udp_addr.sin_addr = cfg->peer_ip; + udp_addr.sin_port = cfg->peer_udp_port; + err = kernel_connect(sock, (struct sockaddr *)&udp_addr, + sizeof(udp_addr), 0); if (err < 0) goto error; - - if (cfg->peer_udp_port) { - udp_addr.sin_family = AF_INET; - udp_addr.sin_addr = cfg->peer_ip; - udp_addr.sin_port = cfg->peer_udp_port; - err = kernel_connect(sock, - (struct sockaddr *)&udp_addr, - sizeof(udp_addr), 0); - if (err < 0) - goto error; - } - - sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; - } else { - return -EPFNOSUPPORT; } + sock->sk->sk_no_check_tx = !cfg->use_udp_checksums; *sockp = sock; - return 0; error: @@ -95,6 +52,57 @@ error: *sockp = NULL; return err; } -EXPORT_SYMBOL(udp_sock_create); +EXPORT_SYMBOL(udp_sock_create4); + +void setup_udp_tunnel_sock(struct net *net, struct socket *sock, + struct udp_tunnel_sock_cfg *cfg) +{ + struct sock *sk = sock->sk; + + /* Disable multicast loopback */ + inet_sk(sk)->mc_loop = 0; + + /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ + udp_set_convert_csum(sk, true); + + rcu_assign_sk_user_data(sk, cfg->sk_user_data); + + udp_sk(sk)->encap_type = cfg->encap_type; + udp_sk(sk)->encap_rcv = cfg->encap_rcv; + udp_sk(sk)->encap_destroy = cfg->encap_destroy; + + udp_tunnel_encap_enable(sock); +} +EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); + +int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt, + struct sk_buff *skb, __be32 src, __be32 dst, + __u8 tos, __u8 ttl, __be16 df, __be16 src_port, + __be16 dst_port, bool xnet) +{ + struct udphdr *uh; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + uh->len = htons(skb->len); + + udp_set_csum(sock->sk->sk_no_check_tx, skb, src, dst, skb->len); + + return iptunnel_xmit(sock->sk, rt, skb, src, dst, IPPROTO_UDP, + tos, ttl, df, xnet); +} +EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb); + +void udp_tunnel_sock_release(struct socket *sock) +{ + rcu_assign_sk_user_data(sock->sk, NULL); + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); +} +EXPORT_SYMBOL_GPL(udp_tunnel_sock_release); MODULE_LICENSE("GPL"); diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 2fe68364bb20..2e8c06108ab9 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -45,3 +45,7 @@ obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o + +ifneq ($(CONFIG_IPV6),) +obj-$(CONFIG_NET_UDP_TUNNEL) += ip6_udp_tunnel.o +endif diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3342ee64f2e3..e189480f8fd6 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -180,7 +180,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = { .rtr_solicits = MAX_RTR_SOLICITATIONS, .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL, .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY, - .use_tempaddr = 0, + .use_tempaddr = 0, .temp_valid_lft = TEMP_VALID_LIFETIME, .temp_prefered_lft = TEMP_PREFERRED_LIFETIME, .regen_max_retry = REGEN_MAX_RETRY, @@ -1105,8 +1105,8 @@ retry: spin_unlock_bh(&ifp->lock); regen_advance = idev->cnf.regen_max_retry * - idev->cnf.dad_transmits * - NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ; + idev->cnf.dad_transmits * + NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ; write_unlock_bh(&idev->lock); /* A temporary address is created only if this calculated Preferred @@ -1725,7 +1725,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); if (ipv6_addr_any(&addr)) return; - ipv6_dev_ac_inc(ifp->idev->dev, &addr); + __ipv6_dev_ac_inc(ifp->idev, &addr); } /* caller must hold RTNL */ @@ -2844,6 +2844,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, if (dev->flags & IFF_SLAVE) break; + if (idev && idev->cnf.disable_ipv6) + break; + if (event == NETDEV_UP) { if (!addrconf_qdisc_ok(dev)) { /* device is not ready yet. */ @@ -3030,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) struct hlist_head *h = &inet6_addr_lst[i]; spin_lock_bh(&addrconf_hash_lock); - restart: +restart: hlist_for_each_entry_rcu(ifa, h, addr_lst) { if (ifa->idev == idev) { hlist_del_init_rcu(&ifa->addr_lst); @@ -3544,8 +3547,8 @@ static void __net_exit if6_proc_net_exit(struct net *net) } static struct pernet_operations if6_proc_net_ops = { - .init = if6_proc_net_init, - .exit = if6_proc_net_exit, + .init = if6_proc_net_init, + .exit = if6_proc_net_exit, }; int __init if6_proc_init(void) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 2daa3a133e49..e4865a3ebe1d 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -7,15 +7,15 @@ * * Adapted from linux/net/ipv4/af_inet.c * - * Fixes: + * Fixes: * piggy, Karl Knutson : Socket protocol table - * Hideaki YOSHIFUJI : sin6_scope_id support - * Arnaldo Melo : check proc_net_create return, cleanups + * Hideaki YOSHIFUJI : sin6_scope_id support + * Arnaldo Melo : check proc_net_create return, cleanups * * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "IPv6: " fmt @@ -302,7 +302,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) /* Reproduce AF_INET checks to make the bindings consistent */ v4addr = addr->sin6_addr.s6_addr32[3]; chk_addr_ret = inet_addr_type(net, v4addr); - if (!sysctl_ip_nonlocal_bind && + if (!net->ipv4.sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && v4addr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 72a4930bdc0a..fcffd4e522c8 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -17,10 +17,10 @@ * Authors * * Mitsuru KANDA @USAGI : IPv6 Support - * Kazunori MIYAZAWA @USAGI : - * Kunihiro Ishiguro <kunihiro@ipinfusion.com> + * Kazunori MIYAZAWA @USAGI : + * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * - * This file is derived from net/ipv4/ah.c. + * This file is derived from net/ipv4/ah.c. */ #define pr_fmt(fmt) "IPv6: " fmt @@ -284,7 +284,7 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) ipv6_rearrange_rthdr(iph, exthdr.rth); break; - default : + default: return 0; } @@ -478,7 +478,7 @@ static void ah6_input_done(struct crypto_async_request *base, int err) auth_data = ah_tmp_auth(work_iph, hdr_len); icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); - err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; + err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0; if (err) goto out; @@ -622,7 +622,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) goto out_free; } - err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; + err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0; if (err) goto out_free; @@ -647,8 +647,8 @@ static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); - struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; - struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset); + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset); struct xfrm_state *x; if (type != ICMPV6_PKT_TOOBIG && @@ -755,11 +755,10 @@ static int ah6_rcv_cb(struct sk_buff *skb, int err) return 0; } -static const struct xfrm_type ah6_type = -{ +static const struct xfrm_type ah6_type = { .description = "AH6", .owner = THIS_MODULE, - .proto = IPPROTO_AH, + .proto = IPPROTO_AH, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = ah6_init_state, .destructor = ah6_destroy, diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 9a386842fd62..f5e319a8d4e2 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -46,10 +46,6 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr); -/* Big ac list lock for all the sockets */ -static DEFINE_SPINLOCK(ipv6_sk_ac_lock); - - /* * socket join an anycast group */ @@ -78,7 +74,6 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) pac->acl_addr = *addr; rtnl_lock(); - rcu_read_lock(); if (ifindex == 0) { struct rt6_info *rt; @@ -91,11 +86,11 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) goto error; } else { /* router, no matching interface: just pick one */ - dev = dev_get_by_flags_rcu(net, IFF_UP, - IFF_UP | IFF_LOOPBACK); + dev = __dev_get_by_flags(net, IFF_UP, + IFF_UP | IFF_LOOPBACK); } } else - dev = dev_get_by_index_rcu(net, ifindex); + dev = __dev_get_by_index(net, ifindex); if (dev == NULL) { err = -ENODEV; @@ -127,17 +122,14 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) goto error; } - err = ipv6_dev_ac_inc(dev, addr); + err = __ipv6_dev_ac_inc(idev, addr); if (!err) { - spin_lock_bh(&ipv6_sk_ac_lock); pac->acl_next = np->ipv6_ac_list; np->ipv6_ac_list = pac; - spin_unlock_bh(&ipv6_sk_ac_lock); pac = NULL; } error: - rcu_read_unlock(); rtnl_unlock(); if (pac) sock_kfree_s(sk, pac, sizeof(*pac)); @@ -154,7 +146,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) struct ipv6_ac_socklist *pac, *prev_pac; struct net *net = sock_net(sk); - spin_lock_bh(&ipv6_sk_ac_lock); + rtnl_lock(); prev_pac = NULL; for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) { if ((ifindex == 0 || pac->acl_ifindex == ifindex) && @@ -163,7 +155,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) prev_pac = pac; } if (!pac) { - spin_unlock_bh(&ipv6_sk_ac_lock); + rtnl_unlock(); return -ENOENT; } if (prev_pac) @@ -171,14 +163,9 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) else np->ipv6_ac_list = pac->acl_next; - spin_unlock_bh(&ipv6_sk_ac_lock); - - rtnl_lock(); - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, pac->acl_ifindex); + dev = __dev_get_by_index(net, pac->acl_ifindex); if (dev) ipv6_dev_ac_dec(dev, &pac->acl_addr); - rcu_read_unlock(); rtnl_unlock(); sock_kfree_s(sk, pac, sizeof(*pac)); @@ -196,19 +183,16 @@ void ipv6_sock_ac_close(struct sock *sk) if (!np->ipv6_ac_list) return; - spin_lock_bh(&ipv6_sk_ac_lock); + rtnl_lock(); pac = np->ipv6_ac_list; np->ipv6_ac_list = NULL; - spin_unlock_bh(&ipv6_sk_ac_lock); prev_index = 0; - rtnl_lock(); - rcu_read_lock(); while (pac) { struct ipv6_ac_socklist *next = pac->acl_next; if (pac->acl_ifindex != prev_index) { - dev = dev_get_by_index_rcu(net, pac->acl_ifindex); + dev = __dev_get_by_index(net, pac->acl_ifindex); prev_index = pac->acl_ifindex; } if (dev) @@ -216,10 +200,14 @@ void ipv6_sock_ac_close(struct sock *sk) sock_kfree_s(sk, pac, sizeof(*pac)); pac = next; } - rcu_read_unlock(); rtnl_unlock(); } +static void aca_get(struct ifacaddr6 *aca) +{ + atomic_inc(&aca->aca_refcnt); +} + static void aca_put(struct ifacaddr6 *ac) { if (atomic_dec_and_test(&ac->aca_refcnt)) { @@ -229,23 +217,40 @@ static void aca_put(struct ifacaddr6 *ac) } } +static struct ifacaddr6 *aca_alloc(struct rt6_info *rt, + const struct in6_addr *addr) +{ + struct inet6_dev *idev = rt->rt6i_idev; + struct ifacaddr6 *aca; + + aca = kzalloc(sizeof(*aca), GFP_ATOMIC); + if (aca == NULL) + return NULL; + + aca->aca_addr = *addr; + in6_dev_hold(idev); + aca->aca_idev = idev; + aca->aca_rt = rt; + aca->aca_users = 1; + /* aca_tstamp should be updated upon changes */ + aca->aca_cstamp = aca->aca_tstamp = jiffies; + atomic_set(&aca->aca_refcnt, 1); + spin_lock_init(&aca->aca_lock); + + return aca; +} + /* * device anycast group inc (add if not found) */ -int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) +int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr) { struct ifacaddr6 *aca; - struct inet6_dev *idev; struct rt6_info *rt; int err; ASSERT_RTNL(); - idev = in6_dev_get(dev); - - if (idev == NULL) - return -EINVAL; - write_lock_bh(&idev->lock); if (idev->dead) { err = -ENODEV; @@ -260,46 +265,35 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) } } - /* - * not found: create a new one. - */ - - aca = kzalloc(sizeof(struct ifacaddr6), GFP_ATOMIC); - - if (aca == NULL) { - err = -ENOMEM; - goto out; - } - rt = addrconf_dst_alloc(idev, addr, true); if (IS_ERR(rt)) { - kfree(aca); err = PTR_ERR(rt); goto out; } - - aca->aca_addr = *addr; - aca->aca_idev = idev; - aca->aca_rt = rt; - aca->aca_users = 1; - /* aca_tstamp should be updated upon changes */ - aca->aca_cstamp = aca->aca_tstamp = jiffies; - atomic_set(&aca->aca_refcnt, 2); - spin_lock_init(&aca->aca_lock); + aca = aca_alloc(rt, addr); + if (aca == NULL) { + ip6_rt_put(rt); + err = -ENOMEM; + goto out; + } aca->aca_next = idev->ac_list; idev->ac_list = aca; + + /* Hold this for addrconf_join_solict() below before we unlock, + * it is already exposed via idev->ac_list. + */ + aca_get(aca); write_unlock_bh(&idev->lock); ip6_ins_rt(rt); - addrconf_join_solict(dev, &aca->aca_addr); + addrconf_join_solict(idev->dev, &aca->aca_addr); aca_put(aca); return 0; out: write_unlock_bh(&idev->lock); - in6_dev_put(idev); return err; } @@ -341,7 +335,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr) return 0; } -/* called with rcu_read_lock() */ +/* called with rtnl_lock() */ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr) { struct inet6_dev *idev = __in6_dev_get(dev); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 2753319524f1..2cdc38338be3 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -43,13 +43,13 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; - struct inet_sock *inet = inet_sk(sk); - struct ipv6_pinfo *np = inet6_sk(sk); - struct in6_addr *daddr, *final_p, final; + struct inet_sock *inet = inet_sk(sk); + struct ipv6_pinfo *np = inet6_sk(sk); + struct in6_addr *daddr, *final_p, final; struct dst_entry *dst; struct flowi6 fl6; struct ip6_flowlabel *flowlabel = NULL; - struct ipv6_txoptions *opt; + struct ipv6_txoptions *opt; int addr_type; int err; @@ -332,7 +332,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); struct sock_exterr_skb *serr; - struct sk_buff *skb, *skb2; + struct sk_buff *skb; DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name); struct { struct sock_extended_err ee; @@ -342,7 +342,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) int copied; err = -EAGAIN; - skb = skb_dequeue(&sk->sk_error_queue); + skb = sock_dequeue_err_skb(sk); if (skb == NULL) goto out; @@ -415,17 +415,6 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_ERRQUEUE; err = copied; - /* Reset and regenerate socket error */ - spin_lock_bh(&sk->sk_error_queue.lock); - sk->sk_err = 0; - if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { - sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; - spin_unlock_bh(&sk->sk_error_queue.lock); - sk->sk_error_report(sk); - } else { - spin_unlock_bh(&sk->sk_error_queue.lock); - } - out_free_skb: kfree_skb(skb); out: diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index d15da1377149..83fc3a385a26 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -17,10 +17,10 @@ * Authors * * Mitsuru KANDA @USAGI : IPv6 Support - * Kazunori MIYAZAWA @USAGI : - * Kunihiro Ishiguro <kunihiro@ipinfusion.com> + * Kazunori MIYAZAWA @USAGI : + * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * - * This file is derived from net/ipv4/esp.c + * This file is derived from net/ipv4/esp.c */ #define pr_fmt(fmt) "IPv6: " fmt @@ -598,7 +598,7 @@ static int esp6_init_state(struct xfrm_state *x) case XFRM_MODE_BEET: if (x->sel.family != AF_INET6) x->props.header_len += IPV4_BEET_PHMAXLEN + - (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); + (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); break; case XFRM_MODE_TRANSPORT: break; @@ -621,11 +621,10 @@ static int esp6_rcv_cb(struct sk_buff *skb, int err) return 0; } -static const struct xfrm_type esp6_type = -{ +static const struct xfrm_type esp6_type = { .description = "ESP6", - .owner = THIS_MODULE, - .proto = IPPROTO_ESP, + .owner = THIS_MODULE, + .proto = IPPROTO_ESP, .flags = XFRM_TYPE_REPLAY_PROT, .init_state = esp6_init_state, .destructor = esp6_destroy, diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 8d67900aa003..bfde361b6134 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -142,7 +142,7 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb) default: /* Other TLV code so scan list */ if (optlen > len) goto bad; - for (curr=procs; curr->type >= 0; curr++) { + for (curr = procs; curr->type >= 0; curr++) { if (curr->type == nh[off]) { /* type specific length/alignment checks will be performed in the diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 06ba3e58320b..394bb824fe4b 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -503,7 +503,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) msg.type = type; len = skb->len - msg.offset; - len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr)); + len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr)); if (len < 0) { LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n"); goto out_dst_release; @@ -636,7 +636,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) /* now skip over extension headers */ inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); - if (inner_offset<0) + if (inner_offset < 0) goto out; } else { inner_offset = sizeof(struct ipv6hdr); @@ -808,7 +808,7 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, memset(fl6, 0, sizeof(*fl6)); fl6->saddr = *saddr; fl6->daddr = *daddr; - fl6->flowi6_proto = IPPROTO_ICMPV6; + fl6->flowi6_proto = IPPROTO_ICMPV6; fl6->fl6_icmp_type = type; fl6->fl6_icmp_code = 0; fl6->flowi6_oif = oif; @@ -875,8 +875,8 @@ static void __net_exit icmpv6_sk_exit(struct net *net) } static struct pernet_operations icmpv6_sk_ops = { - .init = icmpv6_sk_init, - .exit = icmpv6_sk_exit, + .init = icmpv6_sk_init, + .exit = icmpv6_sk_exit, }; int __init icmpv6_init(void) diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index a245e5ddffbd..29b32206e494 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -63,7 +63,6 @@ int inet6_csk_bind_conflict(const struct sock *sk, return sk2 != NULL; } - EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); struct dst_entry *inet6_csk_route_req(struct sock *sk, @@ -144,7 +143,6 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk, return NULL; } - EXPORT_SYMBOL_GPL(inet6_csk_search_req); void inet6_csk_reqsk_queue_hash_add(struct sock *sk, @@ -160,10 +158,9 @@ void inet6_csk_reqsk_queue_hash_add(struct sock *sk, reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); inet_csk_reqsk_queue_added(sk, timeout); } - EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add); -void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) +void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr; @@ -175,7 +172,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, sk->sk_bound_dev_if); } - EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); static inline diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 262e13c02ec2..051dffb49c90 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -6,7 +6,7 @@ * Generic INET6 transport hashtables * * Authors: Lotsa people, from code originally in tcp, generalised here - * by Arnaldo Carvalho de Melo <acme@mandriva.com> + * by Arnaldo Carvalho de Melo <acme@mandriva.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -198,7 +198,7 @@ begin: } } else if (score == hiscore && reuseport) { matches++; - if (((u64)phash * matches) >> 32 == 0) + if (reciprocal_scale(phash, matches) == 0) result = sk; phash = next_pseudo_random32(phash); } @@ -222,7 +222,6 @@ begin: rcu_read_unlock(); return result; } - EXPORT_SYMBOL_GPL(inet6_lookup_listener); struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, @@ -238,7 +237,6 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, return sk; } - EXPORT_SYMBOL_GPL(inet6_lookup); static int __inet6_check_established(struct inet_timewait_death_row *death_row, @@ -324,5 +322,4 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk), __inet6_check_established, __inet6_hash); } - EXPORT_SYMBOL_GPL(inet6_hash_connect); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 4052694c6f2c..3dd7d4ebd7cd 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -136,7 +136,7 @@ static void ip6_fl_gc(unsigned long dummy) spin_lock(&ip6_fl_lock); - for (i=0; i<=FL_HASH_MASK; i++) { + for (i = 0; i <= FL_HASH_MASK; i++) { struct ip6_flowlabel *fl; struct ip6_flowlabel __rcu **flp; @@ -239,7 +239,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net, /* Socket flowlabel lists */ -struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label) +struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label) { struct ipv6_fl_socklist *sfl; struct ipv6_pinfo *np = inet6_sk(sk); @@ -259,7 +259,6 @@ struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label) rcu_read_unlock_bh(); return NULL; } - EXPORT_SYMBOL_GPL(fl6_sock_lookup); void fl6_free_socklist(struct sock *sk) @@ -293,11 +292,11 @@ void fl6_free_socklist(struct sock *sk) following rthdr. */ -struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space, - struct ip6_flowlabel * fl, - struct ipv6_txoptions * fopt) +struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, + struct ip6_flowlabel *fl, + struct ipv6_txoptions *fopt) { - struct ipv6_txoptions * fl_opt = fl->opt; + struct ipv6_txoptions *fl_opt = fl->opt; if (fopt == NULL || fopt->opt_flen == 0) return fl_opt; @@ -388,7 +387,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, goto done; msg.msg_controllen = olen; - msg.msg_control = (void*)(fl->opt+1); + msg.msg_control = (void *)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, @@ -517,7 +516,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) struct net *net = sock_net(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct in6_flowlabel_req freq; - struct ipv6_fl_socklist *sfl1=NULL; + struct ipv6_fl_socklist *sfl1 = NULL; struct ipv6_fl_socklist *sfl; struct ipv6_fl_socklist __rcu **sflp; struct ip6_flowlabel *fl, *fl1 = NULL; @@ -542,7 +541,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) } spin_lock_bh(&ip6_sk_fl_lock); for (sflp = &np->ipv6_fl_list; - (sfl = rcu_dereference(*sflp))!=NULL; + (sfl = rcu_dereference(*sflp)) != NULL; sflp = &sfl->next) { if (sfl->fl->label == freq.flr_label) { if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c index 4578e23834f7..14dacc544c3e 100644 --- a/net/ipv6/ip6_icmp.c +++ b/net/ipv6/ip6_icmp.c @@ -13,7 +13,7 @@ static ip6_icmp_send_t __rcu *ip6_icmp_send; int inet6_register_icmp_sender(ip6_icmp_send_t *fn) { return (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, NULL, fn) == NULL) ? - 0 : -EBUSY; + 0 : -EBUSY; } EXPORT_SYMBOL(inet6_register_icmp_sender); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 51d54dc376f3..a3084ab5df6c 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -15,8 +15,8 @@ */ /* Changes * - * Mitsuru KANDA @USAGI and - * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs(). + * Mitsuru KANDA @USAGI and + * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs(). */ #include <linux/errno.h> @@ -65,7 +65,7 @@ int ip6_rcv_finish(struct sk_buff *skb) int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct ipv6hdr *hdr; - u32 pkt_len; + u32 pkt_len; struct inet6_dev *idev; struct net *net = dev_net(skb->dev); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 65eda2a8af48..9952f3fce30a 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -244,7 +244,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, continue; iph2 = (struct ipv6hdr *)(p->data + off); - first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ; + first_word = *(__be32 *)iph ^ *(__be32 *)iph2; /* All fields must match except length and Traffic Class. * XXX skbs on the gro_list have all been parsed and pulled @@ -261,6 +261,9 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, /* flush if Traffic Class fields are different */ NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000)); NAPI_GRO_CB(p)->flush |= flush; + + /* Clear flush_id, there's really no concept of ID in IPv6. */ + NAPI_GRO_CB(p)->flush_id = 0; } NAPI_GRO_CB(skb)->flush |= flush; @@ -314,6 +317,8 @@ static const struct net_offload sit_offload = { .callbacks = { .gso_send_check = ipv6_gso_send_check, .gso_segment = ipv6_gso_segment, + .gro_receive = ipv6_gro_receive, + .gro_complete = ipv6_gro_complete, }, }; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 0a3448b2888f..8e950c250ada 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -20,7 +20,7 @@ * etc. * * H. von Brand : Added missing #include <linux/string.h> - * Imran Patel : frag id should be in NBO + * Imran Patel : frag id should be in NBO * Kazunori MIYAZAWA @USAGI * : add ip6_append_data and related functions * for datagram xmit @@ -233,7 +233,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -EMSGSIZE; } - EXPORT_SYMBOL(ip6_xmit); static int ip6_call_ra_chain(struct sk_buff *skb, int sel) @@ -555,14 +554,14 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt) int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; - struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); + struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; int hroom, troom; __be32 frag_id = 0; - int ptr, offset = 0, err=0; + int ptr, offset = 0, err = 0; u8 *prevhdr, nexthdr = 0; struct net *net = dev_net(skb_dst(skb)->dev); @@ -637,7 +636,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) } __skb_pull(skb, hlen); - fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); + fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); memcpy(skb_network_header(skb), tmp_hdr, hlen); @@ -662,7 +661,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); - fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); + fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), tmp_hdr, @@ -681,7 +680,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) } err = output(skb); - if(!err) + if (!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); @@ -702,11 +701,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) return 0; } - while (frag) { - skb = frag->next; - kfree_skb(frag); - frag = skb; - } + kfree_skb_list(frag); IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); @@ -742,7 +737,7 @@ slow_path: /* * Keep copying data until we run out. */ - while(left > 0) { + while (left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) @@ -865,7 +860,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk, /* Yes, checking route validity in not connected * case is not very simple. Take into account, * that we do not support routing by source, TOS, - * and MSG_DONTROUTE --ANK (980726) + * and MSG_DONTROUTE --ANK (980726) * * 1. ip6_rt_check(): If route was host route, * check that cached destination is current. @@ -1049,7 +1044,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, - int transhdrlen, int mtu,unsigned int flags, + int transhdrlen, int mtu, unsigned int flags, struct rt6_info *rt) { @@ -1072,7 +1067,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, skb_reserve(skb, hh_len); /* create space for UDP/IP header */ - skb_put(skb,fragheaderlen + transhdrlen); + skb_put(skb, fragheaderlen + transhdrlen); /* initialize network header pointer */ skb_reset_network_header(skb); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index f9de5a695072..e01bd0399297 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -408,12 +408,12 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw) { const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw; __u8 nexthdr = ipv6h->nexthdr; - __u16 off = sizeof (*ipv6h); + __u16 off = sizeof(*ipv6h); while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) { __u16 optlen = 0; struct ipv6_opt_hdr *hdr; - if (raw + off + sizeof (*hdr) > skb->data && + if (raw + off + sizeof(*hdr) > skb->data && !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr))) break; @@ -530,7 +530,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; - if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { + if ((len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { rel_type = ICMPV6_PKT_TOOBIG; rel_code = 0; rel_info = mtu; @@ -991,7 +991,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, t->parms.name); goto tx_err_dst_release; } - mtu = dst_mtu(dst) - sizeof (*ipv6h); + mtu = dst_mtu(dst) - sizeof(*ipv6h); if (encap_limit >= 0) { max_headroom += 8; mtu -= 8; @@ -1083,7 +1083,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; - memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_IPIP; dsfield = ipv4_get_dsfield(iph); @@ -1135,7 +1135,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; - memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6)); + memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_IPV6; dsfield = ipv6_get_dsfield(ipv6h); @@ -1229,11 +1229,11 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) if (rt->dst.dev) { dev->hard_header_len = rt->dst.dev->hard_header_len + - sizeof (struct ipv6hdr); + sizeof(struct ipv6hdr); - dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr); + dev->mtu = rt->dst.dev->mtu - sizeof(struct ipv6hdr); if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) - dev->mtu-=8; + dev->mtu -= 8; if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; @@ -1350,7 +1350,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGETTUNNEL: if (dev == ip6n->fb_tnl_dev) { - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) { + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { err = -EFAULT; break; } @@ -1362,7 +1362,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) memset(&p, 0, sizeof(p)); } ip6_tnl_parm_to_user(&p, &t->parms); - if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) { + if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) { err = -EFAULT; } break; @@ -1372,7 +1372,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) break; err = -EINVAL; if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP && @@ -1407,7 +1407,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (dev == ip6n->fb_tnl_dev) { err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) + if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) break; err = -ENOENT; ip6_tnl_parm_from_user(&p1, &p); @@ -1482,11 +1482,11 @@ static void ip6_tnl_dev_setup(struct net_device *dev) dev->destructor = ip6_dev_free; dev->type = ARPHRD_TUNNEL6; - dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); - dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); + dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); + dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr); t = netdev_priv(dev); if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) - dev->mtu-=8; + dev->mtu -= 8; dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c new file mode 100644 index 000000000000..b04ed72c4542 --- /dev/null +++ b/net/ipv6/ip6_udp_tunnel.c @@ -0,0 +1,107 @@ +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/socket.h> +#include <linux/udp.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/in6.h> +#include <net/udp.h> +#include <net/udp_tunnel.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> +#include <net/ip6_tunnel.h> +#include <net/ip6_checksum.h> + +int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, + struct socket **sockp) +{ + struct sockaddr_in6 udp6_addr; + int err; + struct socket *sock = NULL; + + err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock); + if (err < 0) + goto error; + + sk_change_net(sock->sk, net); + + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->local_udp_port; + err = kernel_bind(sock, (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr)); + if (err < 0) + goto error; + + if (cfg->peer_udp_port) { + udp6_addr.sin6_family = AF_INET6; + memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6, + sizeof(udp6_addr.sin6_addr)); + udp6_addr.sin6_port = cfg->peer_udp_port; + err = kernel_connect(sock, + (struct sockaddr *)&udp6_addr, + sizeof(udp6_addr), 0); + } + if (err < 0) + goto error; + + udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums); + udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums); + + *sockp = sock; + return 0; + +error: + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sk_release_kernel(sock->sk); + } + *sockp = NULL; + return err; +} +EXPORT_SYMBOL_GPL(udp_sock_create6); + +int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst, + struct sk_buff *skb, struct net_device *dev, + struct in6_addr *saddr, struct in6_addr *daddr, + __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port) +{ + struct udphdr *uh; + struct ipv6hdr *ip6h; + struct sock *sk = sock->sk; + + __skb_push(skb, sizeof(*uh)); + skb_reset_transport_header(skb); + uh = udp_hdr(skb); + + uh->dest = dst_port; + uh->source = src_port; + + uh->len = htons(skb->len); + uh->check = 0; + + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED + | IPSKB_REROUTED); + skb_dst_set(skb, dst); + + udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, + &sk->sk_v6_daddr, skb->len); + + __skb_push(skb, sizeof(*ip6h)); + skb_reset_network_header(skb); + ip6h = ipv6_hdr(skb); + ip6_flow_hdr(ip6h, prio, htonl(0)); + ip6h->payload_len = htons(skb->len); + ip6h->nexthdr = IPPROTO_UDP; + ip6h->hop_limit = ttl; + ip6h->daddr = *daddr; + ip6h->saddr = *saddr; + + ip6tunnel_xmit(skb, dev); + return 0; +} +EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb); + +MODULE_LICENSE("GPL"); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index f9a3fd320d1d..0171f08325c3 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -845,7 +845,7 @@ static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c) atomic_dec(&mrt->cache_resolve_queue_len); - while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { + while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) { if (ipv6_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); nlh->nlmsg_type = NLMSG_ERROR; @@ -1103,7 +1103,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt, * Play the pending entries through our router */ - while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { + while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) { if (ipv6_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index d1c793cffcb5..1b9316e1386a 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -181,8 +181,7 @@ static int ipcomp6_rcv_cb(struct sk_buff *skb, int err) return 0; } -static const struct xfrm_type ipcomp6_type = -{ +static const struct xfrm_type ipcomp6_type = { .description = "IPCOMP6", .owner = THIS_MODULE, .proto = IPPROTO_COMP, @@ -193,8 +192,7 @@ static const struct xfrm_type ipcomp6_type = .hdr_offset = xfrm6_find_1stfragopt, }; -static struct xfrm6_protocol ipcomp6_protocol = -{ +static struct xfrm6_protocol ipcomp6_protocol = { .handler = xfrm6_rcv, .cb_handler = ipcomp6_rcv_cb, .err_handler = ipcomp6_err, diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 0c289982796d..e1a9583bb419 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -66,12 +66,12 @@ int ip6_ra_control(struct sock *sk, int sel) if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW) return -ENOPROTOOPT; - new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; + new_ra = (sel >= 0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; write_lock_bh(&ip6_ra_lock); - for (rap = &ip6_ra_chain; (ra=*rap) != NULL; rap = &ra->next) { + for (rap = &ip6_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { if (ra->sk == sk) { - if (sel>=0) { + if (sel >= 0) { write_unlock_bh(&ip6_ra_lock); kfree(new_ra); return -EADDRINUSE; @@ -130,7 +130,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, int retv = -ENOPROTOOPT; if (optval == NULL) - val=0; + val = 0; else { if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) @@ -139,7 +139,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, val = 0; } - valbool = (val!=0); + valbool = (val != 0); if (ip6_mroute_opt(optname)) return ip6_mroute_setsockopt(sk, optname, optval, optlen); @@ -474,7 +474,7 @@ sticky_done: goto done; msg.msg_controllen = optlen; - msg.msg_control = (void*)(opt+1); + msg.msg_control = (void *)(opt+1); retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, &junk); @@ -687,7 +687,7 @@ done: retv = -ENOBUFS; break; } - gsf = kmalloc(optlen,GFP_KERNEL); + gsf = kmalloc(optlen, GFP_KERNEL); if (!gsf) { retv = -ENOBUFS; break; @@ -873,7 +873,6 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, #endif return err; } - EXPORT_SYMBOL(ipv6_setsockopt); #ifdef CONFIG_COMPAT @@ -909,7 +908,6 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, #endif return err; } - EXPORT_SYMBOL(compat_ipv6_setsockopt); #endif @@ -921,7 +919,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, if (!opt) return 0; - switch(optname) { + switch (optname) { case IPV6_HOPOPTS: hdr = opt->hopopt; break; @@ -1284,9 +1282,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, return -ENOPROTOOPT; } len = min_t(unsigned int, sizeof(int), len); - if(put_user(len, optlen)) + if (put_user(len, optlen)) return -EFAULT; - if(copy_to_user(optval,&val,len)) + if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } @@ -1299,7 +1297,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, if (level == SOL_IP && sk->sk_type != SOCK_RAW) return udp_prot.getsockopt(sk, level, optname, optval, optlen); - if(level != SOL_IPV6) + if (level != SOL_IPV6) return -ENOPROTOOPT; err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0); @@ -1321,7 +1319,6 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, #endif return err; } - EXPORT_SYMBOL(ipv6_getsockopt); #ifdef CONFIG_COMPAT @@ -1364,7 +1361,6 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, #endif return err; } - EXPORT_SYMBOL(compat_ipv6_getsockopt); #endif diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index a23b655a7627..9648de2b6745 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -64,15 +64,6 @@ #include <net/ip6_checksum.h> -/* Set to 3 to get tracing... */ -#define MCAST_DEBUG 2 - -#if MCAST_DEBUG >= 3 -#define MDBG(x) printk x -#else -#define MDBG(x) -#endif - /* Ensure that we have struct in6_addr aligned on 32bit word. */ static void *__mld2_query_bugs[] __attribute__((__unused__)) = { BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4), @@ -82,9 +73,6 @@ static void *__mld2_query_bugs[] __attribute__((__unused__)) = { static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT; -/* Big mc list lock for all the sockets */ -static DEFINE_SPINLOCK(ipv6_sk_mc_lock); - static void igmp6_join_group(struct ifmcaddr6 *ma); static void igmp6_leave_group(struct ifmcaddr6 *ma); static void igmp6_timer_handler(unsigned long data); @@ -121,6 +109,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, #define IPV6_MLD_MAX_MSF 64 int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF; +int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT; /* * socket join on multicast group @@ -173,7 +162,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) mc_lst->addr = *addr; rtnl_lock(); - rcu_read_lock(); if (ifindex == 0) { struct rt6_info *rt; rt = rt6_lookup(net, addr, NULL, 0, 0); @@ -182,10 +170,9 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) ip6_rt_put(rt); } } else - dev = dev_get_by_index_rcu(net, ifindex); + dev = __dev_get_by_index(net, ifindex); if (dev == NULL) { - rcu_read_unlock(); rtnl_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return -ENODEV; @@ -203,18 +190,14 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) err = ipv6_dev_mc_inc(dev, addr); if (err) { - rcu_read_unlock(); rtnl_unlock(); sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); return err; } - spin_lock(&ipv6_sk_mc_lock); mc_lst->next = np->ipv6_mc_list; rcu_assign_pointer(np->ipv6_mc_list, mc_lst); - spin_unlock(&ipv6_sk_mc_lock); - rcu_read_unlock(); rtnl_unlock(); return 0; @@ -234,20 +217,16 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return -EINVAL; rtnl_lock(); - spin_lock(&ipv6_sk_mc_lock); for (lnk = &np->ipv6_mc_list; - (mc_lst = rcu_dereference_protected(*lnk, - lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ; + (mc_lst = rtnl_dereference(*lnk)) != NULL; lnk = &mc_lst->next) { if ((ifindex == 0 || mc_lst->ifindex == ifindex) && ipv6_addr_equal(&mc_lst->addr, addr)) { struct net_device *dev; *lnk = mc_lst->next; - spin_unlock(&ipv6_sk_mc_lock); - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, mc_lst->ifindex); + dev = __dev_get_by_index(net, mc_lst->ifindex); if (dev != NULL) { struct inet6_dev *idev = __in6_dev_get(dev); @@ -256,7 +235,6 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) __ipv6_dev_mc_dec(idev, &mc_lst->addr); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); - rcu_read_unlock(); rtnl_unlock(); atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); @@ -264,7 +242,6 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) return 0; } } - spin_unlock(&ipv6_sk_mc_lock); rtnl_unlock(); return -EADDRNOTAVAIL; @@ -311,16 +288,12 @@ void ipv6_sock_mc_close(struct sock *sk) return; rtnl_lock(); - spin_lock(&ipv6_sk_mc_lock); - while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, - lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { + while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) { struct net_device *dev; np->ipv6_mc_list = mc_lst->next; - spin_unlock(&ipv6_sk_mc_lock); - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, mc_lst->ifindex); + dev = __dev_get_by_index(net, mc_lst->ifindex); if (dev) { struct inet6_dev *idev = __in6_dev_get(dev); @@ -329,14 +302,11 @@ void ipv6_sock_mc_close(struct sock *sk) __ipv6_dev_mc_dec(idev, &mc_lst->addr); } else (void) ip6_mc_leave_src(sk, mc_lst, NULL); - rcu_read_unlock(); atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); kfree_rcu(mc_lst, rcu); - spin_lock(&ipv6_sk_mc_lock); } - spin_unlock(&ipv6_sk_mc_lock); rtnl_unlock(); } @@ -400,7 +370,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, if (!psl) goto done; /* err = -EADDRNOTAVAIL */ rv = !0; - for (i=0; i<psl->sl_count; i++) { + for (i = 0; i < psl->sl_count; i++) { rv = !ipv6_addr_equal(&psl->sl_addr[i], source); if (rv == 0) break; @@ -417,7 +387,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, /* update the interface filter */ ip6_mc_del_src(idev, group, omode, 1, source, 1); - for (j=i+1; j<psl->sl_count; j++) + for (j = i+1; j < psl->sl_count; j++) psl->sl_addr[j-1] = psl->sl_addr[j]; psl->sl_count--; err = 0; @@ -443,19 +413,19 @@ int ip6_mc_source(int add, int omode, struct sock *sk, newpsl->sl_max = count; newpsl->sl_count = count - IP6_SFBLOCK; if (psl) { - for (i=0; i<psl->sl_count; i++) + for (i = 0; i < psl->sl_count; i++) newpsl->sl_addr[i] = psl->sl_addr[i]; sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max)); } pmc->sflist = psl = newpsl; } rv = 1; /* > 0 for insert logic below if sl_count is 0 */ - for (i=0; i<psl->sl_count; i++) { + for (i = 0; i < psl->sl_count; i++) { rv = !ipv6_addr_equal(&psl->sl_addr[i], source); if (rv == 0) /* There is an error in the address. */ goto done; } - for (j=psl->sl_count-1; j>=i; j--) + for (j = psl->sl_count-1; j >= i; j--) psl->sl_addr[j+1] = psl->sl_addr[j]; psl->sl_addr[i] = *source; psl->sl_count++; @@ -524,7 +494,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) goto done; } newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc; - for (i=0; i<newpsl->sl_count; ++i) { + for (i = 0; i < newpsl->sl_count; ++i) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i]; @@ -586,9 +556,8 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, } err = -EADDRNOTAVAIL; - /* - * changes to the ipv6_mc_list require the socket lock and - * a read lock on ip6_sk_mc_lock. We have the socket lock, + /* changes to the ipv6_mc_list require the socket lock and + * rtnl lock. We have the socket lock and rcu read lock, * so reading the list is safe. */ @@ -612,11 +581,10 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) { return -EFAULT; } - /* changes to psl require the socket lock, a read lock on - * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We - * have the socket lock, so reading here is safe. + /* changes to psl require the socket lock, and a write lock + * on pmc->sflock. We have the socket lock so reading here is safe. */ - for (i=0; i<copycount; i++) { + for (i = 0; i < copycount; i++) { struct sockaddr_in6 *psin6; struct sockaddr_storage ss; @@ -658,7 +626,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, } else { int i; - for (i=0; i<psl->sl_count; i++) { + for (i = 0; i < psl->sl_count; i++) { if (ipv6_addr_equal(&psl->sl_addr[i], src_addr)) break; } @@ -673,14 +641,6 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr, return rv; } -static void ma_put(struct ifmcaddr6 *mc) -{ - if (atomic_dec_and_test(&mc->mca_refcnt)) { - in6_dev_put(mc->idev); - kfree(mc); - } -} - static void igmp6_group_added(struct ifmcaddr6 *mc) { struct net_device *dev = mc->idev->dev; @@ -772,7 +732,7 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) pmc->mca_tomb = im->mca_tomb; pmc->mca_sources = im->mca_sources; im->mca_tomb = im->mca_sources = NULL; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = pmc->mca_crcount; } spin_unlock_bh(&im->mca_lock); @@ -790,7 +750,7 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) spin_lock_bh(&idev->mc_lock); pmc_prev = NULL; - for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) { + for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) { if (ipv6_addr_equal(&pmc->mca_addr, pmca)) break; pmc_prev = pmc; @@ -804,7 +764,7 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca) spin_unlock_bh(&idev->mc_lock); if (pmc) { - for (psf=pmc->mca_tomb; psf; psf=psf_next) { + for (psf = pmc->mca_tomb; psf; psf = psf_next) { psf_next = psf->sf_next; kfree(psf); } @@ -831,14 +791,14 @@ static void mld_clear_delrec(struct inet6_dev *idev) /* clear dead sources, too */ read_lock_bh(&idev->lock); - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { struct ip6_sf_list *psf, *psf_next; spin_lock_bh(&pmc->mca_lock); psf = pmc->mca_tomb; pmc->mca_tomb = NULL; spin_unlock_bh(&pmc->mca_lock); - for (; psf; psf=psf_next) { + for (; psf; psf = psf_next) { psf_next = psf->sf_next; kfree(psf); } @@ -846,6 +806,48 @@ static void mld_clear_delrec(struct inet6_dev *idev) read_unlock_bh(&idev->lock); } +static void mca_get(struct ifmcaddr6 *mc) +{ + atomic_inc(&mc->mca_refcnt); +} + +static void ma_put(struct ifmcaddr6 *mc) +{ + if (atomic_dec_and_test(&mc->mca_refcnt)) { + in6_dev_put(mc->idev); + kfree(mc); + } +} + +static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev, + const struct in6_addr *addr) +{ + struct ifmcaddr6 *mc; + + mc = kzalloc(sizeof(*mc), GFP_ATOMIC); + if (mc == NULL) + return NULL; + + setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); + + mc->mca_addr = *addr; + mc->idev = idev; /* reference taken by caller */ + mc->mca_users = 1; + /* mca_stamp should be updated upon changes */ + mc->mca_cstamp = mc->mca_tstamp = jiffies; + atomic_set(&mc->mca_refcnt, 1); + spin_lock_init(&mc->mca_lock); + + /* initial mode is (EX, empty) */ + mc->mca_sfmode = MCAST_EXCLUDE; + mc->mca_sfcount[MCAST_EXCLUDE] = 1; + + if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || + IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) + mc->mca_flags |= MAF_NOREPORT; + + return mc; +} /* * device multicast group inc (add if not found) @@ -881,38 +883,20 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) } } - /* - * not found: create a new one. - */ - - mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC); - - if (mc == NULL) { + mc = mca_alloc(idev, addr); + if (!mc) { write_unlock_bh(&idev->lock); in6_dev_put(idev); return -ENOMEM; } - setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); - - mc->mca_addr = *addr; - mc->idev = idev; /* (reference taken) */ - mc->mca_users = 1; - /* mca_stamp should be updated upon changes */ - mc->mca_cstamp = mc->mca_tstamp = jiffies; - atomic_set(&mc->mca_refcnt, 2); - spin_lock_init(&mc->mca_lock); - - /* initial mode is (EX, empty) */ - mc->mca_sfmode = MCAST_EXCLUDE; - mc->mca_sfcount[MCAST_EXCLUDE] = 1; - - if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) || - IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) - mc->mca_flags |= MAF_NOREPORT; - mc->next = idev->mc_list; idev->mc_list = mc; + + /* Hold this for the code below before we unlock, + * it is already exposed via idev->mc_list. + */ + mca_get(mc); write_unlock_bh(&idev->lock); mld_del_delrec(idev, &mc->mca_addr); @@ -931,7 +915,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) ASSERT_RTNL(); write_lock_bh(&idev->lock); - for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { + for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) { if (ipv6_addr_equal(&ma->mca_addr, addr)) { if (--ma->mca_users == 0) { *map = ma->next; @@ -956,7 +940,7 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) struct inet6_dev *idev; int err; - rcu_read_lock(); + ASSERT_RTNL(); idev = __in6_dev_get(dev); if (!idev) @@ -964,7 +948,6 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) else err = __ipv6_dev_mc_dec(idev, addr); - rcu_read_unlock(); return err; } @@ -982,7 +965,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, idev = __in6_dev_get(dev); if (idev) { read_lock_bh(&idev->lock); - for (mc = idev->mc_list; mc; mc=mc->next) { + for (mc = idev->mc_list; mc; mc = mc->next) { if (ipv6_addr_equal(&mc->mca_addr, group)) break; } @@ -991,7 +974,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, struct ip6_sf_list *psf; spin_lock_bh(&mc->mca_lock); - for (psf=mc->mca_sources;psf;psf=psf->sf_next) { + for (psf = mc->mca_sources; psf; psf = psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, src_addr)) break; } @@ -1000,7 +983,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, psf->sf_count[MCAST_EXCLUDE] != mc->mca_sfcount[MCAST_EXCLUDE]; else - rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0; + rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0; spin_unlock_bh(&mc->mca_lock); } else rv = true; /* don't filter unspecified source */ @@ -1091,10 +1074,10 @@ static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, int i, scount; scount = 0; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (scount == nsrcs) break; - for (i=0; i<nsrcs; i++) { + for (i = 0; i < nsrcs; i++) { /* skip inactive filters */ if (psf->sf_count[MCAST_INCLUDE] || pmc->mca_sfcount[MCAST_EXCLUDE] != @@ -1124,10 +1107,10 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, /* mark INCLUDE-mode sources */ scount = 0; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (scount == nsrcs) break; - for (i=0; i<nsrcs; i++) { + for (i = 0; i < nsrcs; i++) { if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { psf->sf_gsresp = 1; scount++; @@ -1205,15 +1188,16 @@ static void mld_update_qrv(struct inet6_dev *idev, * and SHOULD NOT be one. Catch this here if we ever run * into such a case in future. */ + const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv); WARN_ON(idev->mc_qrv == 0); if (mlh2->mld2q_qrv > 0) idev->mc_qrv = mlh2->mld2q_qrv; - if (unlikely(idev->mc_qrv < 2)) { + if (unlikely(idev->mc_qrv < min_qrv)) { net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n", - idev->mc_qrv, MLD_QRV_DEFAULT); - idev->mc_qrv = MLD_QRV_DEFAULT; + idev->mc_qrv, min_qrv); + idev->mc_qrv = min_qrv; } } @@ -1253,7 +1237,7 @@ static void mld_update_qri(struct inet6_dev *idev, } static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, - unsigned long *max_delay) + unsigned long *max_delay, bool v1_query) { unsigned long mldv1_md; @@ -1261,11 +1245,32 @@ static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, if (mld_in_v2_mode_only(idev)) return -EINVAL; - /* MLDv1 router present */ mldv1_md = ntohs(mld->mld_maxdelay); + + /* When in MLDv1 fallback and a MLDv2 router start-up being + * unaware of current MLDv1 operation, the MRC == MRD mapping + * only works when the exponential algorithm is not being + * used (as MLDv1 is unaware of such things). + * + * According to the RFC author, the MLDv2 implementations + * he's aware of all use a MRC < 32768 on start up queries. + * + * Thus, should we *ever* encounter something else larger + * than that, just assume the maximum possible within our + * reach. + */ + if (!v1_query) + mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT); + *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL); - mld_set_v1_mode(idev); + /* MLDv1 router present: we need to go into v1 mode *only* + * when an MLDv1 query is received as per section 9.12. of + * RFC3810! And we know from RFC2710 section 3.7 that MLDv1 + * queries MUST be of exactly 24 octets. + */ + if (v1_query) + mld_set_v1_mode(idev); /* cancel MLDv2 report timer */ mld_gq_stop_timer(idev); @@ -1280,10 +1285,6 @@ static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld, static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld, unsigned long *max_delay) { - /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */ - if (mld_in_v1_mode(idev)) - return -EINVAL; - *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL); mld_update_qrv(idev, mld); @@ -1340,8 +1341,11 @@ int igmp6_event_query(struct sk_buff *skb) !(group_type&IPV6_ADDR_MULTICAST)) return -EINVAL; - if (len == MLD_V1_QUERY_LEN) { - err = mld_process_v1(idev, mld, &max_delay); + if (len < MLD_V1_QUERY_LEN) { + return -EINVAL; + } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) { + err = mld_process_v1(idev, mld, &max_delay, + len == MLD_V1_QUERY_LEN); if (err < 0) return err; } else if (len >= MLD_V2_QUERY_LEN_MIN) { @@ -1373,18 +1377,19 @@ int igmp6_event_query(struct sk_buff *skb) mlh2 = (struct mld2_query *)skb_transport_header(skb); mark = 1; } - } else + } else { return -EINVAL; + } read_lock_bh(&idev->lock); if (group_type == IPV6_ADDR_ANY) { - for (ma = idev->mc_list; ma; ma=ma->next) { + for (ma = idev->mc_list; ma; ma = ma->next) { spin_lock_bh(&ma->mca_lock); igmp6_group_queried(ma, max_delay); spin_unlock_bh(&ma->mca_lock); } } else { - for (ma = idev->mc_list; ma; ma=ma->next) { + for (ma = idev->mc_list; ma; ma = ma->next) { if (!ipv6_addr_equal(group, &ma->mca_addr)) continue; spin_lock_bh(&ma->mca_lock); @@ -1448,7 +1453,7 @@ int igmp6_event_report(struct sk_buff *skb) */ read_lock_bh(&idev->lock); - for (ma = idev->mc_list; ma; ma=ma->next) { + for (ma = idev->mc_list; ma; ma = ma->next) { if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) { spin_lock(&ma->mca_lock); if (del_timer(&ma->mca_timer)) @@ -1512,7 +1517,7 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) struct ip6_sf_list *psf; int scount = 0; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (!is_in(pmc, psf, type, gdeleted, sdeleted)) continue; scount++; @@ -1726,7 +1731,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, } first = 1; psf_prev = NULL; - for (psf=*psf_list; psf; psf=psf_next) { + for (psf = *psf_list; psf; psf = psf_next) { struct in6_addr *psrc; psf_next = psf->sf_next; @@ -1805,7 +1810,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) read_lock_bh(&idev->lock); if (!pmc) { - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { if (pmc->mca_flags & MAF_NOREPORT) continue; spin_lock_bh(&pmc->mca_lock); @@ -1838,7 +1843,7 @@ static void mld_clear_zeros(struct ip6_sf_list **ppsf) struct ip6_sf_list *psf_prev, *psf_next, *psf; psf_prev = NULL; - for (psf=*ppsf; psf; psf = psf_next) { + for (psf = *ppsf; psf; psf = psf_next) { psf_next = psf->sf_next; if (psf->sf_crcount == 0) { if (psf_prev) @@ -1862,7 +1867,7 @@ static void mld_send_cr(struct inet6_dev *idev) /* deleted MCA's */ pmc_prev = NULL; - for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) { + for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) { pmc_next = pmc->next; if (pmc->mca_sfmode == MCAST_INCLUDE) { type = MLD2_BLOCK_OLD_SOURCES; @@ -1895,7 +1900,7 @@ static void mld_send_cr(struct inet6_dev *idev) spin_unlock(&idev->mc_lock); /* change recs */ - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) { type = MLD2_BLOCK_OLD_SOURCES; @@ -2032,7 +2037,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev) skb = NULL; read_lock_bh(&idev->lock); - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) type = MLD2_CHANGE_TO_EXCLUDE; @@ -2077,7 +2082,7 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, int rv = 0; psf_prev = NULL; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) break; psf_prev = psf; @@ -2118,7 +2123,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, if (!idev) return -ENODEV; read_lock_bh(&idev->lock); - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { if (ipv6_addr_equal(pmca, &pmc->mca_addr)) break; } @@ -2138,7 +2143,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, pmc->mca_sfcount[sfmode]--; } err = 0; - for (i=0; i<sfcount; i++) { + for (i = 0; i < sfcount; i++) { int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]); changerec |= rv > 0; @@ -2154,7 +2159,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, pmc->mca_sfmode = MCAST_INCLUDE; pmc->mca_crcount = idev->mc_qrv; idev->mc_ifc_count = pmc->mca_crcount; - for (psf=pmc->mca_sources; psf; psf = psf->sf_next) + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; mld_ifc_event(pmc->idev); } else if (sf_setstate(pmc) || changerec) @@ -2173,7 +2178,7 @@ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, struct ip6_sf_list *psf, *psf_prev; psf_prev = NULL; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (ipv6_addr_equal(&psf->sf_addr, psfsrc)) break; psf_prev = psf; @@ -2198,7 +2203,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc) struct ip6_sf_list *psf; int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE]; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) if (pmc->mca_sfcount[MCAST_EXCLUDE]) { psf->sf_oldin = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && @@ -2215,7 +2220,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc) int new_in, rv; rv = 0; - for (psf=pmc->mca_sources; psf; psf=psf->sf_next) { + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) { if (pmc->mca_sfcount[MCAST_EXCLUDE]) { new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] && !psf->sf_count[MCAST_INCLUDE]; @@ -2225,8 +2230,8 @@ static int sf_setstate(struct ifmcaddr6 *pmc) if (!psf->sf_oldin) { struct ip6_sf_list *prev = NULL; - for (dpsf=pmc->mca_tomb; dpsf; - dpsf=dpsf->sf_next) { + for (dpsf = pmc->mca_tomb; dpsf; + dpsf = dpsf->sf_next) { if (ipv6_addr_equal(&dpsf->sf_addr, &psf->sf_addr)) break; @@ -2248,7 +2253,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc) * add or update "delete" records if an active filter * is now inactive */ - for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next) + for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next) if (ipv6_addr_equal(&dpsf->sf_addr, &psf->sf_addr)) break; @@ -2282,7 +2287,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, if (!idev) return -ENODEV; read_lock_bh(&idev->lock); - for (pmc=idev->mc_list; pmc; pmc=pmc->next) { + for (pmc = idev->mc_list; pmc; pmc = pmc->next) { if (ipv6_addr_equal(pmca, &pmc->mca_addr)) break; } @@ -2298,7 +2303,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, if (!delta) pmc->mca_sfcount[sfmode]++; err = 0; - for (i=0; i<sfcount; i++) { + for (i = 0; i < sfcount; i++) { err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]); if (err) break; @@ -2308,7 +2313,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, if (!delta) pmc->mca_sfcount[sfmode]--; - for (j=0; j<i; j++) + for (j = 0; j < i; j++) ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]); } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) { struct ip6_sf_list *psf; @@ -2322,7 +2327,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, pmc->mca_crcount = idev->mc_qrv; idev->mc_ifc_count = pmc->mca_crcount; - for (psf=pmc->mca_sources; psf; psf = psf->sf_next) + for (psf = pmc->mca_sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; mld_ifc_event(idev); } else if (sf_setstate(pmc)) @@ -2336,12 +2341,12 @@ static void ip6_mc_clear_src(struct ifmcaddr6 *pmc) { struct ip6_sf_list *psf, *nextpsf; - for (psf=pmc->mca_tomb; psf; psf=nextpsf) { + for (psf = pmc->mca_tomb; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } pmc->mca_tomb = NULL; - for (psf=pmc->mca_sources; psf; psf=nextpsf) { + for (psf = pmc->mca_sources; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } @@ -2380,7 +2385,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, { int err; - /* callers have the socket lock and a write lock on ipv6_sk_mc_lock, + /* callers have the socket lock and rtnl lock * so no other readers or writers of iml or its sflist */ if (!iml->sflist) { @@ -2485,13 +2490,21 @@ void ipv6_mc_down(struct inet6_dev *idev) mld_gq_stop_timer(idev); mld_dad_stop_timer(idev); - for (i = idev->mc_list; i; i=i->next) + for (i = idev->mc_list; i; i = i->next) igmp6_group_dropped(i); read_unlock_bh(&idev->lock); mld_clear_delrec(idev); } +static void ipv6_mc_reset(struct inet6_dev *idev) +{ + idev->mc_qrv = sysctl_mld_qrv; + idev->mc_qi = MLD_QI_DEFAULT; + idev->mc_qri = MLD_QRI_DEFAULT; + idev->mc_v1_seen = 0; + idev->mc_maxdelay = unsolicited_report_interval(idev); +} /* Device going up */ @@ -2502,7 +2515,8 @@ void ipv6_mc_up(struct inet6_dev *idev) /* Install multicast list, except for all-nodes (already installed) */ read_lock_bh(&idev->lock); - for (i = idev->mc_list; i; i=i->next) + ipv6_mc_reset(idev); + for (i = idev->mc_list; i; i = i->next) igmp6_group_added(i); read_unlock_bh(&idev->lock); } @@ -2522,13 +2536,7 @@ void ipv6_mc_init_dev(struct inet6_dev *idev) (unsigned long)idev); setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire, (unsigned long)idev); - - idev->mc_qrv = MLD_QRV_DEFAULT; - idev->mc_qi = MLD_QI_DEFAULT; - idev->mc_qri = MLD_QRI_DEFAULT; - - idev->mc_maxdelay = unsolicited_report_interval(idev); - idev->mc_v1_seen = 0; + ipv6_mc_reset(idev); write_unlock_bh(&idev->lock); } diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index db9b6cbc9db3..f61429d391d3 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -336,11 +336,10 @@ static void mip6_destopt_destroy(struct xfrm_state *x) { } -static const struct xfrm_type mip6_destopt_type = -{ +static const struct xfrm_type mip6_destopt_type = { .description = "MIP6DESTOPT", .owner = THIS_MODULE, - .proto = IPPROTO_DSTOPTS, + .proto = IPPROTO_DSTOPTS, .flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_LOCAL_COADDR, .init_state = mip6_destopt_init_state, .destructor = mip6_destopt_destroy, @@ -469,11 +468,10 @@ static void mip6_rthdr_destroy(struct xfrm_state *x) { } -static const struct xfrm_type mip6_rthdr_type = -{ +static const struct xfrm_type mip6_rthdr_type = { .description = "MIP6RT", .owner = THIS_MODULE, - .proto = IPPROTO_ROUTING, + .proto = IPPROTO_ROUTING, .flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_REMOTE_COADDR, .init_state = mip6_rthdr_init_state, .destructor = mip6_rthdr_destroy, diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 339078f95d1b..4cb45c1079a2 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -175,7 +175,7 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, type = cur->nd_opt_type; do { cur = ((void *)cur) + (cur->nd_opt_len << 3); - } while(cur < end && cur->nd_opt_type != type); + } while (cur < end && cur->nd_opt_type != type); return cur <= end && cur->nd_opt_type == type ? cur : NULL; } @@ -192,7 +192,7 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, return NULL; do { cur = ((void *)cur) + (cur->nd_opt_len << 3); - } while(cur < end && !ndisc_is_useropt(cur)); + } while (cur < end && !ndisc_is_useropt(cur)); return cur <= end && ndisc_is_useropt(cur) ? cur : NULL; } @@ -284,7 +284,6 @@ int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, } return -EINVAL; } - EXPORT_SYMBOL(ndisc_mc_map); static u32 ndisc_hash(const void *pkey, @@ -296,7 +295,7 @@ static u32 ndisc_hash(const void *pkey, static int ndisc_constructor(struct neighbour *neigh) { - struct in6_addr *addr = (struct in6_addr*)&neigh->primary_key; + struct in6_addr *addr = (struct in6_addr *)&neigh->primary_key; struct net_device *dev = neigh->dev; struct inet6_dev *in6_dev; struct neigh_parms *parms; @@ -344,7 +343,7 @@ static int ndisc_constructor(struct neighbour *neigh) static int pndisc_constructor(struct pneigh_entry *n) { - struct in6_addr *addr = (struct in6_addr*)&n->key; + struct in6_addr *addr = (struct in6_addr *)&n->key; struct in6_addr maddr; struct net_device *dev = n->dev; @@ -357,7 +356,7 @@ static int pndisc_constructor(struct pneigh_entry *n) static void pndisc_destructor(struct pneigh_entry *n) { - struct in6_addr *addr = (struct in6_addr*)&n->key; + struct in6_addr *addr = (struct in6_addr *)&n->key; struct in6_addr maddr; struct net_device *dev = n->dev; @@ -1065,7 +1064,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) int optlen; unsigned int pref = 0; - __u8 * opt = (__u8 *)(ra_msg + 1); + __u8 *opt = (__u8 *)(ra_msg + 1); optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) - sizeof(struct ra_msg); @@ -1319,7 +1318,7 @@ skip_linkparms: continue; if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) continue; - rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3, + rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3, &ipv6_hdr(skb)->saddr); } } @@ -1352,7 +1351,7 @@ skip_routeinfo: __be32 n; u32 mtu; - memcpy(&n, ((u8*)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu)); + memcpy(&n, ((u8 *)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu)); mtu = ntohl(n); if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 2812816aabdc..a8f25306a46a 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -258,8 +258,21 @@ config IP6_NF_NAT if IP6_NF_NAT +config NF_NAT_MASQUERADE_IPV6 + tristate "IPv6 masquerade support" + help + This is the kernel functionality to provide NAT in the masquerade + flavour (automatic source address selection) for IPv6. + +config NFT_MASQ_IPV6 + tristate "IPv6 masquerade support for nf_tables" + depends on NF_TABLES_IPV6 + depends on NFT_MASQ + select NF_NAT_MASQUERADE_IPV6 + config IP6_NF_TARGET_MASQUERADE tristate "MASQUERADE target support" + select NF_NAT_MASQUERADE_IPV6 help Masquerading is a special case of NAT: all outgoing connections are changed to seem to come from a particular interface's address, and diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index c3d3286db4bb..0f7e5b3f328d 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o +obj-$(CONFIG_NF_NAT_MASQUERADE_IPV6) += nf_nat_masquerade_ipv6.o # defrag nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o @@ -31,6 +32,7 @@ obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o +obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o # matches obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c index 3e4e92d5e157..7f9f45d829d2 100644 --- a/net/ipv6/netfilter/ip6t_MASQUERADE.c +++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c @@ -19,33 +19,12 @@ #include <net/netfilter/nf_nat.h> #include <net/addrconf.h> #include <net/ipv6.h> +#include <net/netfilter/ipv6/nf_nat_masquerade.h> static unsigned int masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par) { - const struct nf_nat_range *range = par->targinfo; - enum ip_conntrack_info ctinfo; - struct in6_addr src; - struct nf_conn *ct; - struct nf_nat_range newrange; - - ct = nf_ct_get(skb, &ctinfo); - NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || - ctinfo == IP_CT_RELATED_REPLY)); - - if (ipv6_dev_get_saddr(dev_net(par->out), par->out, - &ipv6_hdr(skb)->daddr, 0, &src) < 0) - return NF_DROP; - - nfct_nat(ct)->masq_index = par->out->ifindex; - - newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; - newrange.min_addr.in6 = src; - newrange.max_addr.in6 = src; - newrange.min_proto = range->min_proto; - newrange.max_proto = range->max_proto; - - return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); + return nf_nat_masquerade_ipv6(skb, par->targinfo, par->out); } static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par) @@ -57,48 +36,6 @@ static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par) return 0; } -static int device_cmp(struct nf_conn *ct, void *ifindex) -{ - const struct nf_conn_nat *nat = nfct_nat(ct); - - if (!nat) - return 0; - if (nf_ct_l3num(ct) != NFPROTO_IPV6) - return 0; - return nat->masq_index == (int)(long)ifindex; -} - -static int masq_device_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - const struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct net *net = dev_net(dev); - - if (event == NETDEV_DOWN) - nf_ct_iterate_cleanup(net, device_cmp, - (void *)(long)dev->ifindex, 0, 0); - - return NOTIFY_DONE; -} - -static struct notifier_block masq_dev_notifier = { - .notifier_call = masq_device_event, -}; - -static int masq_inet_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct inet6_ifaddr *ifa = ptr; - struct netdev_notifier_info info; - - netdev_notifier_info_init(&info, ifa->idev->dev); - return masq_device_event(this, event, &info); -} - -static struct notifier_block masq_inet_notifier = { - .notifier_call = masq_inet_event, -}; - static struct xt_target masquerade_tg6_reg __read_mostly = { .name = "MASQUERADE", .family = NFPROTO_IPV6, @@ -115,17 +52,14 @@ static int __init masquerade_tg6_init(void) int err; err = xt_register_target(&masquerade_tg6_reg); - if (err == 0) { - register_netdevice_notifier(&masq_dev_notifier); - register_inet6addr_notifier(&masq_inet_notifier); - } + if (err == 0) + nf_nat_masquerade_ipv6_register_notifier(); return err; } static void __exit masquerade_tg6_exit(void) { - unregister_inet6addr_notifier(&masq_inet_notifier); - unregister_netdevice_notifier(&masq_dev_notifier); + nf_nat_masquerade_ipv6_unregister_notifier(); xt_unregister_target(&masquerade_tg6_reg); } diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index 387d8b8fc18d..b0634ac996b7 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c @@ -30,222 +30,57 @@ static const struct xt_table nf_nat_ipv6_table = { .af = NFPROTO_IPV6, }; -static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) -{ - /* Force range to this IP; let proto decide mapping for - * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). - */ - struct nf_nat_range range; - - range.flags = 0; - pr_debug("Allocating NULL binding for %p (%pI6)\n", ct, - HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? - &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6 : - &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6); - - return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); -} - -static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum, - const struct net_device *in, - const struct net_device *out, - struct nf_conn *ct) +static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct) { struct net *net = nf_ct_net(ct); - unsigned int ret; - ret = ip6t_do_table(skb, hooknum, in, out, net->ipv6.ip6table_nat); - if (ret == NF_ACCEPT) { - if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum))) - ret = alloc_null_binding(ct, hooknum); - } - return ret; + return ip6t_do_table(skb, ops->hooknum, in, out, net->ipv6.ip6table_nat); } -static unsigned int -nf_nat_ipv6_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - struct nf_conn_nat *nat; - enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); - __be16 frag_off; - int hdrlen; - u8 nexthdr; - - ct = nf_ct_get(skb, &ctinfo); - /* Can't track? It's not due to stress, or conntrack would - * have dropped it. Hence it's the user's responsibilty to - * packet filter it out, or implement conntrack/NAT for that - * protocol. 8) --RR - */ - if (!ct) - return NF_ACCEPT; - - /* Don't try to NAT if this packet is not conntracked */ - if (nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; - - switch (ctinfo) { - case IP_CT_RELATED: - case IP_CT_RELATED_REPLY: - nexthdr = ipv6_hdr(skb)->nexthdr; - hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), - &nexthdr, &frag_off); - - if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { - if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo, - ops->hooknum, - hdrlen)) - return NF_DROP; - else - return NF_ACCEPT; - } - /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ - case IP_CT_NEW: - /* Seen it before? This can happen for loopback, retrans, - * or local packets. - */ - if (!nf_nat_initialized(ct, maniptype)) { - unsigned int ret; - - ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct); - if (ret != NF_ACCEPT) - return ret; - } else { - pr_debug("Already setup manip %s for ct %p\n", - maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", - ct); - if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) - goto oif_changed; - } - break; - - default: - /* ESTABLISHED */ - NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || - ctinfo == IP_CT_ESTABLISHED_REPLY); - if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) - goto oif_changed; - } - - return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); - -oif_changed: - nf_ct_kill_acct(ct, ctinfo, skb); - return NF_DROP; + return nf_nat_ipv6_fn(ops, skb, in, out, ip6table_nat_do_chain); } -static unsigned int -nf_nat_ipv6_in(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - unsigned int ret; - struct in6_addr daddr = ipv6_hdr(skb)->daddr; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) - skb_dst_drop(skb); - - return ret; + return nf_nat_ipv6_in(ops, skb, in, out, ip6table_nat_do_chain); } -static unsigned int -nf_nat_ipv6_out(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { -#ifdef CONFIG_XFRM - const struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - int err; -#endif - unsigned int ret; - - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct ipv6hdr)) - return NF_ACCEPT; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); -#ifdef CONFIG_XFRM - if (ret != NF_DROP && ret != NF_STOLEN && - !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, - &ct->tuplehash[!dir].tuple.dst.u3) || - (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && - ct->tuplehash[dir].tuple.src.u.all != - ct->tuplehash[!dir].tuple.dst.u.all)) { - err = nf_xfrm_me_harder(skb, AF_INET6); - if (err < 0) - ret = NF_DROP_ERR(err); - } - } -#endif - return ret; + return nf_nat_ipv6_out(ops, skb, in, out, ip6table_nat_do_chain); } -static unsigned int -nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - const struct nf_conn *ct; - enum ip_conntrack_info ctinfo; - unsigned int ret; - int err; - - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct ipv6hdr)) - return NF_ACCEPT; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, - &ct->tuplehash[!dir].tuple.src.u3)) { - err = ip6_route_me_harder(skb); - if (err < 0) - ret = NF_DROP_ERR(err); - } -#ifdef CONFIG_XFRM - else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && - ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && - ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[!dir].tuple.src.u.all) { - err = nf_xfrm_me_harder(skb, AF_INET6); - if (err < 0) - ret = NF_DROP_ERR(err); - } -#endif - } - return ret; + return nf_nat_ipv6_local_fn(ops, skb, in, out, ip6table_nat_do_chain); } static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { /* Before packet filtering, change destination */ { - .hook = nf_nat_ipv6_in, + .hook = ip6table_nat_in, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_PRE_ROUTING, @@ -253,7 +88,7 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { }, /* After packet filtering, change source */ { - .hook = nf_nat_ipv6_out, + .hook = ip6table_nat_out, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_POST_ROUTING, @@ -261,7 +96,7 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { }, /* Before packet filtering, change destination */ { - .hook = nf_nat_ipv6_local_fn, + .hook = ip6table_nat_local_fn, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_OUT, @@ -269,7 +104,7 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { }, /* After packet filtering, change source */ { - .hook = nf_nat_ipv6_fn, + .hook = ip6table_nat_fn, .owner = THIS_MODULE, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_IN, diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index fc8e49b2ff3e..c5812e1c1ffb 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c @@ -261,6 +261,205 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation); +unsigned int +nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + struct nf_conn_nat *nat; + enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); + __be16 frag_off; + int hdrlen; + u8 nexthdr; + + ct = nf_ct_get(skb, &ctinfo); + /* Can't track? It's not due to stress, or conntrack would + * have dropped it. Hence it's the user's responsibilty to + * packet filter it out, or implement conntrack/NAT for that + * protocol. 8) --RR + */ + if (!ct) + return NF_ACCEPT; + + /* Don't try to NAT if this packet is not conntracked */ + if (nf_ct_is_untracked(ct)) + return NF_ACCEPT; + + nat = nf_ct_nat_ext_add(ct); + if (nat == NULL) + return NF_ACCEPT; + + switch (ctinfo) { + case IP_CT_RELATED: + case IP_CT_RELATED_REPLY: + nexthdr = ipv6_hdr(skb)->nexthdr; + hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), + &nexthdr, &frag_off); + + if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { + if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo, + ops->hooknum, + hdrlen)) + return NF_DROP; + else + return NF_ACCEPT; + } + /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */ + case IP_CT_NEW: + /* Seen it before? This can happen for loopback, retrans, + * or local packets. + */ + if (!nf_nat_initialized(ct, maniptype)) { + unsigned int ret; + + ret = do_chain(ops, skb, in, out, ct); + if (ret != NF_ACCEPT) + return ret; + + if (nf_nat_initialized(ct, HOOK2MANIP(ops->hooknum))) + break; + + ret = nf_nat_alloc_null_binding(ct, ops->hooknum); + if (ret != NF_ACCEPT) + return ret; + } else { + pr_debug("Already setup manip %s for ct %p\n", + maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST", + ct); + if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) + goto oif_changed; + } + break; + + default: + /* ESTABLISHED */ + NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || + ctinfo == IP_CT_ESTABLISHED_REPLY); + if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out)) + goto oif_changed; + } + + return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); + +oif_changed: + nf_ct_kill_acct(ct, ctinfo, skb); + return NF_DROP; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn); + +unsigned int +nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + unsigned int ret; + struct in6_addr daddr = ipv6_hdr(skb)->daddr; + + ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); + if (ret != NF_DROP && ret != NF_STOLEN && + ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) + skb_dst_drop(skb); + + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv6_in); + +unsigned int +nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ +#ifdef CONFIG_XFRM + const struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + int err; +#endif + unsigned int ret; + + /* root is playing with raw sockets. */ + if (skb->len < sizeof(struct ipv6hdr)) + return NF_ACCEPT; + + ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); +#ifdef CONFIG_XFRM + if (ret != NF_DROP && ret != NF_STOLEN && + !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && + (ct = nf_ct_get(skb, &ctinfo)) != NULL) { + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + + if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, + &ct->tuplehash[!dir].tuple.dst.u3) || + (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && + ct->tuplehash[dir].tuple.src.u.all != + ct->tuplehash[!dir].tuple.dst.u.all)) { + err = nf_xfrm_me_harder(skb, AF_INET6); + if (err < 0) + ret = NF_DROP_ERR(err); + } + } +#endif + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv6_out); + +unsigned int +nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb, + const struct net_device *in, const struct net_device *out, + unsigned int (*do_chain)(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct)) +{ + const struct nf_conn *ct; + enum ip_conntrack_info ctinfo; + unsigned int ret; + int err; + + /* root is playing with raw sockets. */ + if (skb->len < sizeof(struct ipv6hdr)) + return NF_ACCEPT; + + ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain); + if (ret != NF_DROP && ret != NF_STOLEN && + (ct = nf_ct_get(skb, &ctinfo)) != NULL) { + enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + + if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, + &ct->tuplehash[!dir].tuple.src.u3)) { + err = ip6_route_me_harder(skb); + if (err < 0) + ret = NF_DROP_ERR(err); + } +#ifdef CONFIG_XFRM + else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && + ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && + ct->tuplehash[dir].tuple.dst.u.all != + ct->tuplehash[!dir].tuple.src.u.all) { + err = nf_xfrm_me_harder(skb, AF_INET6); + if (err < 0) + ret = NF_DROP_ERR(err); + } +#endif + } + return ret; +} +EXPORT_SYMBOL_GPL(nf_nat_ipv6_local_fn); + static int __init nf_nat_l3proto_ipv6_init(void) { int err; diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c new file mode 100644 index 000000000000..7745609665cd --- /dev/null +++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2011 Patrick McHardy <kaber@trash.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on Rusty Russell's IPv6 MASQUERADE target. Development of IPv6 + * NAT funded by Astaro. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/atomic.h> +#include <linux/netdevice.h> +#include <linux/ipv6.h> +#include <linux/netfilter.h> +#include <linux/netfilter_ipv6.h> +#include <net/netfilter/nf_nat.h> +#include <net/addrconf.h> +#include <net/ipv6.h> +#include <net/netfilter/ipv6/nf_nat_masquerade.h> + +unsigned int +nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range, + const struct net_device *out) +{ + enum ip_conntrack_info ctinfo; + struct in6_addr src; + struct nf_conn *ct; + struct nf_nat_range newrange; + + ct = nf_ct_get(skb, &ctinfo); + NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || + ctinfo == IP_CT_RELATED_REPLY)); + + if (ipv6_dev_get_saddr(dev_net(out), out, + &ipv6_hdr(skb)->daddr, 0, &src) < 0) + return NF_DROP; + + nfct_nat(ct)->masq_index = out->ifindex; + + newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; + newrange.min_addr.in6 = src; + newrange.max_addr.in6 = src; + newrange.min_proto = range->min_proto; + newrange.max_proto = range->max_proto; + + return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6); + +static int device_cmp(struct nf_conn *ct, void *ifindex) +{ + const struct nf_conn_nat *nat = nfct_nat(ct); + + if (!nat) + return 0; + if (nf_ct_l3num(ct) != NFPROTO_IPV6) + return 0; + return nat->masq_index == (int)(long)ifindex; +} + +static int masq_device_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + const struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct net *net = dev_net(dev); + + if (event == NETDEV_DOWN) + nf_ct_iterate_cleanup(net, device_cmp, + (void *)(long)dev->ifindex, 0, 0); + + return NOTIFY_DONE; +} + +static struct notifier_block masq_dev_notifier = { + .notifier_call = masq_device_event, +}; + +static int masq_inet_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *ifa = ptr; + struct netdev_notifier_info info; + + netdev_notifier_info_init(&info, ifa->idev->dev); + return masq_device_event(this, event, &info); +} + +static struct notifier_block masq_inet_notifier = { + .notifier_call = masq_inet_event, +}; + +static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0); + +void nf_nat_masquerade_ipv6_register_notifier(void) +{ + /* check if the notifier is already set */ + if (atomic_inc_return(&masquerade_notifier_refcount) > 1) + return; + + register_netdevice_notifier(&masq_dev_notifier); + register_inet6addr_notifier(&masq_inet_notifier); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier); + +void nf_nat_masquerade_ipv6_unregister_notifier(void) +{ + /* check if the notifier still has clients */ + if (atomic_dec_return(&masquerade_notifier_refcount) > 0) + return; + + unregister_inet6addr_notifier(&masq_inet_notifier); + unregister_netdevice_notifier(&masq_dev_notifier); +} +EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c index d189fcb437fe..1c4b75dd425b 100644 --- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c +++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c @@ -24,144 +24,53 @@ #include <net/netfilter/nf_nat_l3proto.h> #include <net/ipv6.h> -/* - * IPv6 NAT chains - */ - -static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + struct nf_conn *ct) { - enum ip_conntrack_info ctinfo; - struct nf_conn *ct = nf_ct_get(skb, &ctinfo); - struct nf_conn_nat *nat; - enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum); - __be16 frag_off; - int hdrlen; - u8 nexthdr; struct nft_pktinfo pkt; - unsigned int ret; - - if (ct == NULL || nf_ct_is_untracked(ct)) - return NF_ACCEPT; - - nat = nf_ct_nat_ext_add(ct); - if (nat == NULL) - return NF_ACCEPT; - - switch (ctinfo) { - case IP_CT_RELATED: - case IP_CT_RELATED + IP_CT_IS_REPLY: - nexthdr = ipv6_hdr(skb)->nexthdr; - hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), - &nexthdr, &frag_off); - - if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { - if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo, - ops->hooknum, - hdrlen)) - return NF_DROP; - else - return NF_ACCEPT; - } - /* Fall through */ - case IP_CT_NEW: - if (nf_nat_initialized(ct, maniptype)) - break; - - nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out); - ret = nft_do_chain(&pkt, ops); - if (ret != NF_ACCEPT) - return ret; - if (!nf_nat_initialized(ct, maniptype)) { - ret = nf_nat_alloc_null_binding(ct, ops->hooknum); - if (ret != NF_ACCEPT) - return ret; - } - default: - break; - } + nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out); - return nf_nat_packet(ct, ctinfo, ops->hooknum, skb); + return nft_do_chain(&pkt, ops); } -static unsigned int nf_nat_ipv6_prerouting(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - struct in6_addr daddr = ipv6_hdr(skb)->daddr; - unsigned int ret; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) - skb_dst_drop(skb); - - return ret; + return nf_nat_ipv6_fn(ops, skb, in, out, nft_nat_do_chain); } -static unsigned int nf_nat_ipv6_postrouting(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - enum ip_conntrack_info ctinfo __maybe_unused; - const struct nf_conn *ct __maybe_unused; - unsigned int ret; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); -#ifdef CONFIG_XFRM - if (ret != NF_DROP && ret != NF_STOLEN && - !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, - &ct->tuplehash[!dir].tuple.dst.u3) || - (ct->tuplehash[dir].tuple.src.u.all != - ct->tuplehash[!dir].tuple.dst.u.all)) - if (nf_xfrm_me_harder(skb, AF_INET6) < 0) - ret = NF_DROP; - } -#endif - return ret; + return nf_nat_ipv6_in(ops, skb, in, out, nft_nat_do_chain); } -static unsigned int nf_nat_ipv6_output(const struct nf_hook_ops *ops, - struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - int (*okfn)(struct sk_buff *)) +static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) { - enum ip_conntrack_info ctinfo; - const struct nf_conn *ct; - unsigned int ret; - - ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn); - if (ret != NF_DROP && ret != NF_STOLEN && - (ct = nf_ct_get(skb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); + return nf_nat_ipv6_out(ops, skb, in, out, nft_nat_do_chain); +} - if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, - &ct->tuplehash[!dir].tuple.src.u3)) { - if (ip6_route_me_harder(skb)) - ret = NF_DROP; - } -#ifdef CONFIG_XFRM - else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && - ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[!dir].tuple.src.u.all) - if (nf_xfrm_me_harder(skb, AF_INET6)) - ret = NF_DROP; -#endif - } - return ret; +static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops, + struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + int (*okfn)(struct sk_buff *)) +{ + return nf_nat_ipv6_local_fn(ops, skb, in, out, nft_nat_do_chain); } static const struct nf_chain_type nft_chain_nat_ipv6 = { @@ -174,10 +83,10 @@ static const struct nf_chain_type nft_chain_nat_ipv6 = { (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_LOCAL_IN), .hooks = { - [NF_INET_PRE_ROUTING] = nf_nat_ipv6_prerouting, - [NF_INET_POST_ROUTING] = nf_nat_ipv6_postrouting, - [NF_INET_LOCAL_OUT] = nf_nat_ipv6_output, - [NF_INET_LOCAL_IN] = nf_nat_ipv6_fn, + [NF_INET_PRE_ROUTING] = nft_nat_ipv6_in, + [NF_INET_POST_ROUTING] = nft_nat_ipv6_out, + [NF_INET_LOCAL_OUT] = nft_nat_ipv6_local_fn, + [NF_INET_LOCAL_IN] = nft_nat_ipv6_fn, }, }; diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c new file mode 100644 index 000000000000..4e51334ef6b7 --- /dev/null +++ b/net/ipv6/netfilter/nft_masq_ipv6.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netlink.h> +#include <linux/netfilter.h> +#include <linux/netfilter/nf_tables.h> +#include <net/netfilter/nf_tables.h> +#include <net/netfilter/nf_nat.h> +#include <net/netfilter/nft_masq.h> +#include <net/netfilter/ipv6/nf_nat_masquerade.h> + +static void nft_masq_ipv6_eval(const struct nft_expr *expr, + struct nft_data data[NFT_REG_MAX + 1], + const struct nft_pktinfo *pkt) +{ + struct nft_masq *priv = nft_expr_priv(expr); + struct nf_nat_range range; + unsigned int verdict; + + range.flags = priv->flags; + + verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); + + data[NFT_REG_VERDICT].verdict = verdict; +} + +static int nft_masq_ipv6_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + int err; + + err = nft_masq_init(ctx, expr, tb); + if (err < 0) + return err; + + nf_nat_masquerade_ipv6_register_notifier(); + return 0; +} + +static void nft_masq_ipv6_destroy(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + nf_nat_masquerade_ipv6_unregister_notifier(); +} + +static struct nft_expr_type nft_masq_ipv6_type; +static const struct nft_expr_ops nft_masq_ipv6_ops = { + .type = &nft_masq_ipv6_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)), + .eval = nft_masq_ipv6_eval, + .init = nft_masq_ipv6_init, + .destroy = nft_masq_ipv6_destroy, + .dump = nft_masq_dump, +}; + +static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { + .family = NFPROTO_IPV6, + .name = "masq", + .ops = &nft_masq_ipv6_ops, + .policy = nft_masq_policy, + .maxattr = NFTA_MASQ_MAX, + .owner = THIS_MODULE, +}; + +static int __init nft_masq_ipv6_module_init(void) +{ + return nft_register_expr(&nft_masq_ipv6_type); +} + +static void __exit nft_masq_ipv6_module_exit(void) +{ + nft_unregister_expr(&nft_masq_ipv6_type); +} + +module_init(nft_masq_ipv6_module_init); +module_exit(nft_masq_ipv6_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); +MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "masq"); diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 5ec867e4a8b7..fc24c390af05 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -35,7 +35,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) if (found_rhdr) return offset; break; - default : + default: return offset; } diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 2d6f860e5c1e..1752cd0b4882 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -8,7 +8,7 @@ * except it reports the sockets in the INET6 address family. * * Authors: David S. Miller (davem@caip.rutgers.edu) - * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> + * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c index e048cf1bb6a2..e3770abe688a 100644 --- a/net/ipv6/protocol.c +++ b/net/ipv6/protocol.c @@ -51,6 +51,7 @@ EXPORT_SYMBOL(inet6_del_protocol); #endif const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS] __read_mostly; +EXPORT_SYMBOL(inet6_offloads); int inet6_add_offload(const struct net_offload *prot, unsigned char protocol) { diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 39d44226e402..896af8807979 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -889,7 +889,7 @@ back_from_confirm: else { lock_sock(sk); err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, - len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info*)dst, + len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, msg->msg_flags, dontfrag); if (err) @@ -902,7 +902,7 @@ done: dst_release(dst); out: fl6_sock_release(flowlabel); - return err<0?err:len; + return err < 0 ? err : len; do_confirm: dst_confirm(dst); if (!(msg->msg_flags & MSG_PROBE) || len) @@ -1045,7 +1045,7 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, struct raw6_sock *rp = raw6_sk(sk); int val, len; - if (get_user(len,optlen)) + if (get_user(len, optlen)) return -EFAULT; switch (optname) { @@ -1069,7 +1069,7 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, if (put_user(len, optlen)) return -EFAULT; - if (copy_to_user(optval,&val,len)) + if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index c6557d9f7808..1a157ca2ebc1 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -62,13 +62,12 @@ static const char ip6_frag_cache_name[] = "ip6-frags"; -struct ip6frag_skb_cb -{ +struct ip6frag_skb_cb { struct inet6_skb_parm h; int offset; }; -#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) +#define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb)) static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) { @@ -289,7 +288,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, goto found; } prev = NULL; - for(next = fq->q.fragments; next != NULL; next = next->next) { + for (next = fq->q.fragments; next != NULL; next = next->next) { if (FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; @@ -529,7 +528,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); /* Jumbo payload inhibits frag. header */ - if (hdr->payload_len==0) + if (hdr->payload_len == 0) goto fail_hdr; if (!pskb_may_pull(skb, (skb_transport_offset(skb) + @@ -575,8 +574,7 @@ fail_hdr: return -1; } -static const struct inet6_protocol frag_protocol = -{ +static const struct inet6_protocol frag_protocol = { .handler = ipv6_frag_rcv, .flags = INET6_PROTO_NOPOLICY, }; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f23fbd28a501..f74b0417bd60 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -813,7 +813,7 @@ out: } -struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6, +struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, int flags) { return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); @@ -843,7 +843,6 @@ struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, return NULL; } - EXPORT_SYMBOL(rt6_lookup); /* ip6_ins_rt is called with FREE table->tb6_lock. @@ -1024,7 +1023,7 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); } -struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk, +struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, struct flowi6 *fl6) { int flags = 0; @@ -1041,7 +1040,6 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk, return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); } - EXPORT_SYMBOL(ip6_route_output); struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) @@ -1149,7 +1147,7 @@ static void ip6_link_failure(struct sk_buff *skb) static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { - struct rt6_info *rt6 = (struct rt6_info*)dst; + struct rt6_info *rt6 = (struct rt6_info *)dst; dst_confirm(dst); if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) { @@ -1924,7 +1922,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net, return NULL; read_lock_bh(&table->tb6_lock); - fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0); + fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0); if (!fn) goto out; @@ -1983,7 +1981,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev return NULL; read_lock_bh(&table->tb6_lock); - for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) { + for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { if (dev == rt->dst.dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && ipv6_addr_equal(&rt->rt6i_gateway, addr)) @@ -2068,7 +2066,7 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) struct in6_rtmsg rtmsg; int err; - switch(cmd) { + switch (cmd) { case SIOCADDRT: /* Add a route */ case SIOCDELRT: /* Delete a route */ if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) @@ -2191,7 +2189,7 @@ int ip6_route_get_saddr(struct net *net, unsigned int prefs, struct in6_addr *saddr) { - struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt); + struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); int err = 0; if (rt->rt6i_prefsrc.plen) *saddr = rt->rt6i_prefsrc.addr; @@ -2486,7 +2484,7 @@ beginning: return last_err; } -static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh) +static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) { struct fib6_config cfg; int err; @@ -2501,7 +2499,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh) return ip6_route_del(&cfg); } -static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh) +static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) { struct fib6_config cfg; int err; @@ -2693,7 +2691,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg) prefix, 0, NLM_F_MULTI); } -static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh) +static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[RTA_MAX+1]; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 6163f851dc01..db75809ab843 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -812,9 +812,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, const struct ipv6hdr *iph6 = ipv6_hdr(skb); u8 tos = tunnel->parms.iph.tos; __be16 df = tiph->frag_off; - struct rtable *rt; /* Route to the other host */ - struct net_device *tdev; /* Device to other host */ - unsigned int max_headroom; /* The extra header space needed */ + struct rtable *rt; /* Route to the other host */ + struct net_device *tdev; /* Device to other host */ + unsigned int max_headroom; /* The extra header space needed */ __be32 dst = tiph->daddr; struct flowi4 fl4; int mtu; @@ -822,6 +822,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, int addr_type; u8 ttl; int err; + u8 protocol = IPPROTO_IPV6; + int t_hlen = tunnel->hlen + sizeof(struct iphdr); if (skb->protocol != htons(ETH_P_IPV6)) goto tx_error; @@ -911,8 +913,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, goto tx_error; } + skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); + if (IS_ERR(skb)) { + ip_rt_put(rt); + goto out; + } + if (df) { - mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); + mtu = dst_mtu(&rt->dst) - t_hlen; if (mtu < 68) { dev->stats.collisions++; @@ -947,7 +955,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, /* * Okay, now see if we can stuff it in the buffer as-is. */ - max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr); + max_headroom = LL_RESERVED_SPACE(tdev) + t_hlen; if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { @@ -969,14 +977,13 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ttl = iph6->hop_limit; tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); - skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); - if (IS_ERR(skb)) { + if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) { ip_rt_put(rt); - goto out; + goto tx_error; } err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr, - IPPROTO_IPV6, tos, ttl, df, + protocol, tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; @@ -1059,8 +1066,10 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link); if (tdev) { + int t_hlen = tunnel->hlen + sizeof(struct iphdr); + dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr); - dev->mtu = tdev->mtu - sizeof(struct iphdr); + dev->mtu = tdev->mtu - t_hlen; if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; } @@ -1123,7 +1132,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t, #endif static int -ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) +ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int err = 0; struct ip_tunnel_parm p; @@ -1307,7 +1316,10 @@ done: static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) { - if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr)) + struct ip_tunnel *tunnel = netdev_priv(dev); + int t_hlen = tunnel->hlen + sizeof(struct iphdr); + + if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - t_hlen) return -EINVAL; dev->mtu = new_mtu; return 0; @@ -1338,12 +1350,15 @@ static void ipip6_dev_free(struct net_device *dev) static void ipip6_tunnel_setup(struct net_device *dev) { + struct ip_tunnel *tunnel = netdev_priv(dev); + int t_hlen = tunnel->hlen + sizeof(struct iphdr); + dev->netdev_ops = &ipip6_netdev_ops; - dev->destructor = ipip6_dev_free; + dev->destructor = ipip6_dev_free; dev->type = ARPHRD_SIT; - dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr); - dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr); + dev->hard_header_len = LL_MAX_HEADER + t_hlen; + dev->mtu = ETH_DATA_LEN - t_hlen; dev->flags = IFF_NOARP; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; dev->iflink = 0; @@ -1466,6 +1481,40 @@ static void ipip6_netlink_parms(struct nlattr *data[], } +/* This function returns true when ENCAP attributes are present in the nl msg */ +static bool ipip6_netlink_encap_parms(struct nlattr *data[], + struct ip_tunnel_encap *ipencap) +{ + bool ret = false; + + memset(ipencap, 0, sizeof(*ipencap)); + + if (!data) + return ret; + + if (data[IFLA_IPTUN_ENCAP_TYPE]) { + ret = true; + ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]); + } + + if (data[IFLA_IPTUN_ENCAP_FLAGS]) { + ret = true; + ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]); + } + + if (data[IFLA_IPTUN_ENCAP_SPORT]) { + ret = true; + ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]); + } + + if (data[IFLA_IPTUN_ENCAP_DPORT]) { + ret = true; + ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]); + } + + return ret; +} + #ifdef CONFIG_IPV6_SIT_6RD /* This function returns true when 6RD attributes are present in the nl msg */ static bool ipip6_netlink_6rd_parms(struct nlattr *data[], @@ -1509,12 +1558,20 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev, { struct net *net = dev_net(dev); struct ip_tunnel *nt; + struct ip_tunnel_encap ipencap; #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif int err; nt = netdev_priv(dev); + + if (ipip6_netlink_encap_parms(data, &ipencap)) { + err = ip_tunnel_encap_setup(nt, &ipencap); + if (err < 0) + return err; + } + ipip6_netlink_parms(data, &nt->parms); if (ipip6_tunnel_locate(net, &nt->parms, 0)) @@ -1537,15 +1594,23 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[], { struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel_parm p; + struct ip_tunnel_encap ipencap; struct net *net = t->net; struct sit_net *sitn = net_generic(net, sit_net_id); #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif + int err; if (dev == sitn->fb_tunnel_dev) return -EINVAL; + if (ipip6_netlink_encap_parms(data, &ipencap)) { + err = ip_tunnel_encap_setup(t, &ipencap); + if (err < 0) + return err; + } + ipip6_netlink_parms(data, &p); if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) || @@ -1599,6 +1664,14 @@ static size_t ipip6_get_size(const struct net_device *dev) /* IFLA_IPTUN_6RD_RELAY_PREFIXLEN */ nla_total_size(2) + #endif + /* IFLA_IPTUN_ENCAP_TYPE */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_FLAGS */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_SPORT */ + nla_total_size(2) + + /* IFLA_IPTUN_ENCAP_DPORT */ + nla_total_size(2) + 0; } @@ -1630,6 +1703,16 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; #endif + if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, + tunnel->encap.type) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT, + tunnel->encap.sport) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, + tunnel->encap.dport) || + nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, + tunnel->encap.dport)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -1651,6 +1734,10 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = { [IFLA_IPTUN_6RD_PREFIXLEN] = { .type = NLA_U16 }, [IFLA_IPTUN_6RD_RELAY_PREFIXLEN] = { .type = NLA_U16 }, #endif + [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 }, + [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 }, }; static void ipip6_dellink(struct net_device *dev, struct list_head *head) diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 83cea1d39466..c643dc907ce7 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -24,7 +24,7 @@ #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) -static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS]; +static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; /* RFC 2460, Section 8.3: * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..] diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 0c56c93619e0..c5c10fafcfe2 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -16,6 +16,8 @@ #include <net/addrconf.h> #include <net/inet_frag.h> +static int one = 1; + static struct ctl_table ipv6_table_template[] = { { .procname = "bindv6only", @@ -63,6 +65,14 @@ static struct ctl_table ipv6_rotable[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "mld_qrv", + .data = &sysctl_mld_qrv, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one + }, { } }; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 29964c3d363c..de51a88bec6f 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -93,13 +93,16 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - const struct rt6_info *rt = (const struct rt6_info *)dst; - dst_hold(dst); - sk->sk_rx_dst = dst; - inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; - if (rt->rt6i_node) - inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; + if (dst) { + const struct rt6_info *rt = (const struct rt6_info *)dst; + + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + if (rt->rt6i_node) + inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; + } } static void tcp_v6_hash(struct sock *sk) @@ -738,7 +741,7 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk, ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) ireq->ir_iif = inet6_iif(skb); - if (!TCP_SKB_CB(skb)->when && + if (!TCP_SKB_CB(skb)->tcp_tw_isn && (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim || np->repflow)) { @@ -1412,7 +1415,8 @@ static int tcp_v6_rcv(struct sk_buff *skb) TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); - TCP_SKB_CB(skb)->when = 0; + TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); + TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->sacked = 0; diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index 01b0ff9a0c2c..dbb3d9262bf6 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -35,34 +35,14 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb) static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb) { - const struct ipv6hdr *iph = skb_gro_network_header(skb); - __wsum wsum; - /* Don't bother verifying checksum if we're going to flush anyway. */ - if (NAPI_GRO_CB(skb)->flush) - goto skip_csum; - - wsum = NAPI_GRO_CB(skb)->csum; - - switch (skb->ip_summed) { - case CHECKSUM_NONE: - wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), - wsum); - - /* fall through */ - - case CHECKSUM_COMPLETE: - if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr, - wsum)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } - + if (!NAPI_GRO_CB(skb)->flush && + skb_gro_checksum_validate(skb, IPPROTO_TCP, + ip6_gro_compute_pseudo)) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } -skip_csum: return tcp_gro_receive(head, skb); } diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index 2c4e4c5c7614..3c758007b327 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c @@ -15,7 +15,7 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Authors Mitsuru KANDA <mk@linux-ipv6.org> - * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> + * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> */ #define pr_fmt(fmt) "IPv6: " fmt @@ -64,7 +64,6 @@ err: return ret; } - EXPORT_SYMBOL(xfrm6_tunnel_register); int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) @@ -92,7 +91,6 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family) return ret; } - EXPORT_SYMBOL(xfrm6_tunnel_deregister); #define for_each_tunnel_rcu(head, handler) \ diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4836af8f582d..f6ba535b6feb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -243,7 +243,7 @@ begin: goto exact_match; } else if (score == badness && reuseport) { matches++; - if (((u64)hash * matches) >> 32 == 0) + if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } @@ -323,7 +323,7 @@ begin: } } else if (score == badness && reuseport) { matches++; - if (((u64)hash * matches) >> 32 == 0) + if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } @@ -373,8 +373,8 @@ EXPORT_SYMBOL_GPL(udp6_lib_lookup); /* - * This should be easy, if there is something there we - * return it, otherwise we block. + * This should be easy, if there is something there we + * return it, otherwise we block. */ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, @@ -530,7 +530,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct in6_addr *saddr = &hdr->saddr; const struct in6_addr *daddr = &hdr->daddr; - struct udphdr *uh = (struct udphdr*)(skb->data+offset); + struct udphdr *uh = (struct udphdr *)(skb->data+offset); struct sock *sk; int err; struct net *net = dev_net(skb->dev); @@ -596,7 +596,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) static __inline__ void udpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, - u8 code, int offset, __be32 info ) + u8 code, int offset, __be32 info) { __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); } @@ -891,6 +891,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, goto csum_error; } + if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk)) + skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + ip6_compute_pseudo); + ret = udpv6_queue_rcv_skb(sk, skb); sock_put(sk); @@ -960,10 +964,10 @@ static void udp_v6_flush_pending_frames(struct sock *sk) } /** - * udp6_hwcsum_outgoing - handle outgoing HW checksumming - * @sk: socket we are sending on - * @skb: sk_buff containing the filled-in UDP header - * (checksum field must be zeroed out) + * udp6_hwcsum_outgoing - handle outgoing HW checksumming + * @sk: socket we are sending on + * @skb: sk_buff containing the filled-in UDP header + * (checksum field must be zeroed out) */ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, const struct in6_addr *saddr, @@ -1294,7 +1298,7 @@ do_append_data: getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, - (struct rt6_info*)dst, + (struct rt6_info *)dst, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); if (err) udp_v6_flush_pending_frames(sk); diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 0ae3d98f83e0..de85f809bf29 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -10,6 +10,7 @@ * UDPv6 GSO support */ #include <linux/skbuff.h> +#include <linux/netdevice.h> #include <net/protocol.h> #include <net/ipv6.h> #include <net/udp.h> @@ -127,10 +128,52 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, out: return segs; } + +static struct sk_buff **udp6_gro_receive(struct sk_buff **head, + struct sk_buff *skb) +{ + struct udphdr *uh = udp_gro_udphdr(skb); + + if (unlikely(!uh)) + goto flush; + + /* Don't bother verifying checksum if we're going to flush anyway. */ + if (NAPI_GRO_CB(skb)->flush) + goto skip; + + if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, + ip6_gro_compute_pseudo)) + goto flush; + else if (uh->check) + skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, + ip6_gro_compute_pseudo); + +skip: + return udp_gro_receive(head, skb, uh); + +flush: + NAPI_GRO_CB(skb)->flush = 1; + return NULL; +} + +static int udp6_gro_complete(struct sk_buff *skb, int nhoff) +{ + const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); + + if (uh->check) + uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, + &ipv6h->daddr, 0); + + return udp_gro_complete(skb, nhoff); +} + static const struct net_offload udpv6_offload = { .callbacks = { .gso_send_check = udp6_ufo_send_check, .gso_segment = udp6_ufo_fragment, + .gro_receive = udp6_gro_receive, + .gro_complete = udp6_gro_complete, }, }; diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index f8c3cf842f53..f48fbe4d16f5 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -3,8 +3,8 @@ * * Authors: * Mitsuru KANDA @USAGI - * Kazunori MIYAZAWA @USAGI - * Kunihiro Ishiguro <kunihiro@ipinfusion.com> + * Kazunori MIYAZAWA @USAGI + * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * YOSHIFUJI Hideaki @USAGI * IPv6 support */ @@ -52,7 +52,6 @@ int xfrm6_rcv(struct sk_buff *skb) return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff], 0); } - EXPORT_SYMBOL(xfrm6_rcv); int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, @@ -142,5 +141,4 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, drop: return -1; } - EXPORT_SYMBOL(xfrm6_input_addr); diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 433672d07d0b..ca3f29b98ae5 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -25,7 +25,6 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, { return ip6_find_1stfragopt(skb, prevhdr); } - EXPORT_SYMBOL(xfrm6_find_1stfragopt); static int xfrm6_local_dontfrag(struct sk_buff *skb) diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 2a0bbda2c76a..ac49f84fe2c3 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -3,11 +3,11 @@ * * Authors: * Mitsuru KANDA @USAGI - * Kazunori MIYAZAWA @USAGI - * Kunihiro Ishiguro <kunihiro@ipinfusion.com> - * IPv6 support - * YOSHIFUJI Hideaki - * Split up af-specific portion + * Kazunori MIYAZAWA @USAGI + * Kunihiro Ishiguro <kunihiro@ipinfusion.com> + * IPv6 support + * YOSHIFUJI Hideaki + * Split up af-specific portion * */ @@ -84,7 +84,7 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len) { if (dst->ops->family == AF_INET6) { - struct rt6_info *rt = (struct rt6_info*)dst; + struct rt6_info *rt = (struct rt6_info *)dst; if (rt->rt6i_node) path->path_cookie = rt->rt6i_node->fn_sernum; } @@ -97,7 +97,7 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, const struct flowi *fl) { - struct rt6_info *rt = (struct rt6_info*)xdst->route; + struct rt6_info *rt = (struct rt6_info *)xdst->route; xdst->u.dst.dev = dev; dev_hold(dev); @@ -296,7 +296,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { .family = AF_INET6, .dst_ops = &xfrm6_dst_ops, .dst_lookup = xfrm6_dst_lookup, - .get_saddr = xfrm6_get_saddr, + .get_saddr = xfrm6_get_saddr, .decode_session = _decode_session6, .get_tos = xfrm6_get_tos, .init_dst = xfrm6_init_dst, @@ -319,9 +319,9 @@ static void xfrm6_policy_fini(void) static struct ctl_table xfrm6_policy_table[] = { { .procname = "xfrm6_gc_thresh", - .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, - .maxlen = sizeof(int), - .mode = 0644, + .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, + .maxlen = sizeof(int), + .mode = 0644, .proc_handler = proc_dointvec, }, { } diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index 3fc970135fc6..8a1f9c0d2a13 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c @@ -3,11 +3,11 @@ * * Authors: * Mitsuru KANDA @USAGI - * Kazunori MIYAZAWA @USAGI - * Kunihiro Ishiguro <kunihiro@ipinfusion.com> - * IPv6 support - * YOSHIFUJI Hideaki @USAGI - * Split up af-specific portion + * Kazunori MIYAZAWA @USAGI + * Kunihiro Ishiguro <kunihiro@ipinfusion.com> + * IPv6 support + * YOSHIFUJI Hideaki @USAGI + * Split up af-specific portion * */ @@ -45,10 +45,10 @@ xfrm6_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl, const xfrm_address_t *daddr, const xfrm_address_t *saddr) { x->id = tmpl->id; - if (ipv6_addr_any((struct in6_addr*)&x->id.daddr)) + if (ipv6_addr_any((struct in6_addr *)&x->id.daddr)) memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr)); - if (ipv6_addr_any((struct in6_addr*)&x->props.saddr)) + if (ipv6_addr_any((struct in6_addr *)&x->props.saddr)) memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr)); x->props.mode = tmpl->mode; x->props.reqid = tmpl->reqid; diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 1c66465a42dd..5743044cd660 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -15,7 +15,7 @@ * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Authors Mitsuru KANDA <mk@linux-ipv6.org> - * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> + * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> * * Based on net/ipv4/xfrm4_tunnel.c * @@ -110,7 +110,6 @@ __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr) rcu_read_unlock_bh(); return htonl(spi); } - EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi) @@ -187,7 +186,6 @@ __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr) return htonl(spi); } - EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); static void x6spi_destroy_rcu(struct rcu_head *head) diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index da787930df0a..2a6a1fdd62c0 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -493,8 +493,8 @@ static void iucv_declare_cpu(void *data) err = "Paging or storage error"; break; } - pr_warning("Defining an interrupt buffer on CPU %i" - " failed with 0x%02x (%s)\n", cpu, rc, err); + pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n", + cpu, rc, err); return; } @@ -1831,7 +1831,7 @@ static void iucv_external_interrupt(struct ext_code ext_code, BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); if (!work) { - pr_warning("iucv_external_interrupt: out of memory\n"); + pr_warn("iucv_external_interrupt: out of memory\n"); return; } memcpy(&work->data, p, sizeof(work->data)); @@ -1974,8 +1974,7 @@ static int iucv_pm_restore(struct device *dev) printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); #endif if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table) - pr_warning("Suspending Linux did not completely close all IUCV " - "connections\n"); + pr_warn("Suspending Linux did not completely close all IUCV connections\n"); iucv_pm_state = IUCV_PM_RESTORING; if (cpumask_empty(&iucv_irq_cpumask)) { rc = iucv_query_maxconn(); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 1109d3bb8dac..895348e44c7d 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -148,7 +148,7 @@ do { \ atomic_read(&_t->ref_count)); \ l2tp_tunnel_inc_refcount_1(_t); \ } while (0) -#define l2tp_tunnel_dec_refcount(_t) +#define l2tp_tunnel_dec_refcount(_t) \ do { \ pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \ __func__, __LINE__, (_t)->name, \ @@ -1582,19 +1582,17 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ tunnel->encap = encap; if (encap == L2TP_ENCAPTYPE_UDP) { - /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ - udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; - udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; - udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) - udpv6_encap_enable(); - else -#endif - udp_encap_enable(); - } + struct udp_tunnel_sock_cfg udp_cfg; + + udp_cfg.sk_user_data = tunnel; + udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP; + udp_cfg.encap_rcv = l2tp_udp_encap_recv; + udp_cfg.encap_destroy = l2tp_udp_encap_destroy; - sk->sk_user_data = tunnel; + setup_udp_tunnel_sock(net, sock, &udp_cfg); + } else { + sk->sk_user_data = tunnel; + } /* Hook on the tunnel socket destructor so that we can cleanup * if the tunnel socket goes away. diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 927b4ea0128b..4d8989b87960 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1011,15 +1011,8 @@ static int sta_apply_parameters(struct ieee80211_local *local, clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE); } - if (mask & BIT(NL80211_STA_FLAG_WME)) { - if (set & BIT(NL80211_STA_FLAG_WME)) { - set_sta_flag(sta, WLAN_STA_WME); - sta->sta.wme = true; - } else { - clear_sta_flag(sta, WLAN_STA_WME); - sta->sta.wme = false; - } - } + if (mask & BIT(NL80211_STA_FLAG_WME)) + sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME); if (mask & BIT(NL80211_STA_FLAG_MFP)) { if (set & BIT(NL80211_STA_FLAG_MFP)) @@ -3352,7 +3345,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, band = chanctx_conf->def.chan->band; sta = sta_info_get_bss(sdata, peer); if (sta) { - qos = test_sta_flag(sta, WLAN_STA_WME); + qos = sta->sta.wme; } else { rcu_read_unlock(); return -ENOLINK; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 399ad82c997f..4c74e8da64b9 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -549,12 +549,12 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, compat = cfg80211_chandef_compatible( &sdata->vif.bss_conf.chandef, compat); - if (!compat) + if (WARN_ON_ONCE(!compat)) break; } rcu_read_unlock(); - if (WARN_ON_ONCE(!compat)) + if (!compat) return; ieee80211_change_chanctx(local, ctx, compat); @@ -639,41 +639,6 @@ out: return ret; } -static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_chanctx_conf *conf; - struct ieee80211_chanctx *ctx; - bool use_reserved_switch = false; - - lockdep_assert_held(&local->chanctx_mtx); - - conf = rcu_dereference_protected(sdata->vif.chanctx_conf, - lockdep_is_held(&local->chanctx_mtx)); - if (!conf) - return; - - ctx = container_of(conf, struct ieee80211_chanctx, conf); - - if (sdata->reserved_chanctx) { - if (sdata->reserved_chanctx->replace_state == - IEEE80211_CHANCTX_REPLACES_OTHER && - ieee80211_chanctx_num_reserved(local, - sdata->reserved_chanctx) > 1) - use_reserved_switch = true; - - ieee80211_vif_unreserve_chanctx(sdata); - } - - ieee80211_assign_vif_chanctx(sdata, NULL); - if (ieee80211_chanctx_refcount(local, ctx) == 0) - ieee80211_free_chanctx(local, ctx); - - /* Unreserving may ready an in-place reservation. */ - if (use_reserved_switch) - ieee80211_vif_use_reserved_switch(local); -} - void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx) { @@ -764,63 +729,6 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS); } -int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, - const struct cfg80211_chan_def *chandef, - enum ieee80211_chanctx_mode mode) -{ - struct ieee80211_local *local = sdata->local; - struct ieee80211_chanctx *ctx; - u8 radar_detect_width = 0; - int ret; - - lockdep_assert_held(&local->mtx); - - WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); - - mutex_lock(&local->chanctx_mtx); - - ret = cfg80211_chandef_dfs_required(local->hw.wiphy, - chandef, - sdata->wdev.iftype); - if (ret < 0) - goto out; - if (ret > 0) - radar_detect_width = BIT(chandef->width); - - sdata->radar_required = ret; - - ret = ieee80211_check_combinations(sdata, chandef, mode, - radar_detect_width); - if (ret < 0) - goto out; - - __ieee80211_vif_release_channel(sdata); - - ctx = ieee80211_find_chanctx(local, chandef, mode); - if (!ctx) - ctx = ieee80211_new_chanctx(local, chandef, mode); - if (IS_ERR(ctx)) { - ret = PTR_ERR(ctx); - goto out; - } - - sdata->vif.bss_conf.chandef = *chandef; - - ret = ieee80211_assign_vif_chanctx(sdata, ctx); - if (ret) { - /* if assign fails refcount stays the same */ - if (ieee80211_chanctx_refcount(local, ctx) == 0) - ieee80211_free_chanctx(local, ctx); - goto out; - } - - ieee80211_recalc_smps_chanctx(local, ctx); - ieee80211_recalc_radar_chanctx(local, ctx); - out: - mutex_unlock(&local->chanctx_mtx); - return ret; -} - static void __ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata, bool clear) @@ -1269,8 +1177,7 @@ err: return err; } -int -ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) +static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *sdata_tmp; struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx; @@ -1522,6 +1429,98 @@ err: return err; } +static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx_conf *conf; + struct ieee80211_chanctx *ctx; + bool use_reserved_switch = false; + + lockdep_assert_held(&local->chanctx_mtx); + + conf = rcu_dereference_protected(sdata->vif.chanctx_conf, + lockdep_is_held(&local->chanctx_mtx)); + if (!conf) + return; + + ctx = container_of(conf, struct ieee80211_chanctx, conf); + + if (sdata->reserved_chanctx) { + if (sdata->reserved_chanctx->replace_state == + IEEE80211_CHANCTX_REPLACES_OTHER && + ieee80211_chanctx_num_reserved(local, + sdata->reserved_chanctx) > 1) + use_reserved_switch = true; + + ieee80211_vif_unreserve_chanctx(sdata); + } + + ieee80211_assign_vif_chanctx(sdata, NULL); + if (ieee80211_chanctx_refcount(local, ctx) == 0) + ieee80211_free_chanctx(local, ctx); + + /* Unreserving may ready an in-place reservation. */ + if (use_reserved_switch) + ieee80211_vif_use_reserved_switch(local); +} + +int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_chanctx *ctx; + u8 radar_detect_width = 0; + int ret; + + lockdep_assert_held(&local->mtx); + + WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev)); + + mutex_lock(&local->chanctx_mtx); + + ret = cfg80211_chandef_dfs_required(local->hw.wiphy, + chandef, + sdata->wdev.iftype); + if (ret < 0) + goto out; + if (ret > 0) + radar_detect_width = BIT(chandef->width); + + sdata->radar_required = ret; + + ret = ieee80211_check_combinations(sdata, chandef, mode, + radar_detect_width); + if (ret < 0) + goto out; + + __ieee80211_vif_release_channel(sdata); + + ctx = ieee80211_find_chanctx(local, chandef, mode); + if (!ctx) + ctx = ieee80211_new_chanctx(local, chandef, mode); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto out; + } + + sdata->vif.bss_conf.chandef = *chandef; + + ret = ieee80211_assign_vif_chanctx(sdata, ctx); + if (ret) { + /* if assign fails refcount stays the same */ + if (ieee80211_chanctx_refcount(local, ctx) == 0) + ieee80211_free_chanctx(local, ctx); + goto out; + } + + ieee80211_recalc_smps_chanctx(local, ctx); + ieee80211_recalc_radar_chanctx(local, ctx); + out: + mutex_unlock(&local->chanctx_mtx); + return ret; +} + int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 86173c0de40e..33eb4a43a2f3 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -77,7 +77,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, TEST(AUTH), TEST(ASSOC), TEST(PS_STA), TEST(PS_DRIVER), TEST(AUTHORIZED), TEST(SHORT_PREAMBLE), - TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), + sta->sta.wme ? "WME\n" : "", + TEST(WDS), TEST(CLEAR_PS_FILT), TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 9713dc54ea4b..5f9654d31a8d 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1038,7 +1038,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, } if (sta && elems->wmm_info) - set_sta_flag(sta, WLAN_STA_WME); + sta->sta.wme = true; if (sta && elems->ht_operation && elems->ht_cap_elem && sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT && diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ef7a089ac546..ffb20e5e6cf3 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1869,7 +1869,6 @@ ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata, int __must_check ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata); int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata); -int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local); int __must_check ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/key.c b/net/mac80211/key.c index d808cff80153..6429d0e1d4a1 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -130,9 +130,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) if (!ret) { key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || - (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) + if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) sdata->crypto_tx_tailroom_needed_cnt--; WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && @@ -180,9 +178,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) sta = key->sta; sdata = key->sdata; - if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || - (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) + if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) increment_tailroom_need_count(sdata); ret = drv_set_key(key->local, DISABLE_KEY, sdata, @@ -878,9 +874,7 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || - (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) + if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) increment_tailroom_need_count(key->sdata); } diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index cf032a8db9d7..a6699dceae7c 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -729,7 +729,7 @@ void mesh_plink_broken(struct sta_info *sta) tbl = rcu_dereference(mesh_paths); for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; - if (rcu_dereference(mpath->next_hop) == sta && + if (rcu_access_pointer(mpath->next_hop) == sta && mpath->flags & MESH_PATH_ACTIVE && !(mpath->flags & MESH_PATH_FIXED)) { spin_lock_bh(&mpath->state_lock); @@ -794,7 +794,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) tbl = resize_dereference_mesh_paths(); for_each_mesh_entry(tbl, node, i) { mpath = node->mpath; - if (rcu_dereference(mpath->next_hop) == sta) { + if (rcu_access_pointer(mpath->next_hop) == sta) { spin_lock(&tbl->hashwlock[i]); __mesh_path_del(tbl, node); spin_unlock(&tbl->hashwlock[i]); diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index c47194d27149..b488e1859b18 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -431,14 +431,12 @@ __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr) return NULL; sta->plink_state = NL80211_PLINK_LISTEN; + sta->sta.wme = true; sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED); - set_sta_flag(sta, WLAN_STA_WME); - sta->sta.wme = true; - return sta; } @@ -1004,7 +1002,6 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, enum ieee80211_self_protected_actioncode ftype; u32 changed = 0; u8 ie_len = elems->peering_len; - __le16 _plid, _llid; u16 plid, llid = 0; if (!elems->peering) { @@ -1039,13 +1036,10 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, /* Note the lines below are correct, the llid in the frame is the plid * from the point of view of this host. */ - memcpy(&_plid, PLINK_GET_LLID(elems->peering), sizeof(__le16)); - plid = le16_to_cpu(_plid); + plid = get_unaligned_le16(PLINK_GET_LLID(elems->peering)); if (ftype == WLAN_SP_MESH_PEERING_CONFIRM || - (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8)) { - memcpy(&_llid, PLINK_GET_PLID(elems->peering), sizeof(__le16)); - llid = le16_to_cpu(_llid); - } + (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8)) + llid = get_unaligned_le16(PLINK_GET_PLID(elems->peering)); /* WARNING: Only for sta pointer, is dropped & re-acquired */ rcu_read_lock(); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b82a12a9f0f1..8a73de6a5f5b 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -149,6 +149,7 @@ static u32 ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, + const struct ieee80211_ht_cap *ht_cap, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, struct cfg80211_chan_def *chandef, bool tracking) @@ -162,13 +163,19 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, chandef->center_freq1 = channel->center_freq; chandef->center_freq2 = 0; - if (!ht_oper || !sband->ht_cap.ht_supported) { + if (!ht_cap || !ht_oper || !sband->ht_cap.ht_supported) { ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; goto out; } chandef->width = NL80211_CHAN_WIDTH_20; + if (!(ht_cap->cap_info & + cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40))) { + ret = IEEE80211_STA_DISABLE_40MHZ | IEEE80211_STA_DISABLE_VHT; + goto out; + } + ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, channel->band); /* check that channel matches the right operating channel */ @@ -328,6 +335,7 @@ out: static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, + const struct ieee80211_ht_cap *ht_cap, const struct ieee80211_ht_operation *ht_oper, const struct ieee80211_vht_operation *vht_oper, const u8 *bssid, u32 *changed) @@ -367,8 +375,9 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, sband = local->hw.wiphy->bands[chan->band]; /* calculate new channel (type) based on HT/VHT operation IEs */ - flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, - vht_oper, &chandef, true); + flags = ieee80211_determine_chantype(sdata, sband, chan, + ht_cap, ht_oper, vht_oper, + &chandef, true); /* * Downgrade the new channel if we associated with restricted @@ -2677,8 +2686,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) set_sta_flag(sta, WLAN_STA_MFP); - if (elems.wmm_param) - set_sta_flag(sta, WLAN_STA_WME); + sta->sta.wme = elems.wmm_param; err = sta_info_move_state(sta, IEEE80211_STA_ASSOC); if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) @@ -3174,7 +3182,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, bssid); - if (ieee80211_config_bw(sdata, sta, elems.ht_operation, + if (ieee80211_config_bw(sdata, sta, + elems.ht_cap_elem, elems.ht_operation, elems.vht_operation, bssid, &changed)) { mutex_unlock(&local->sta_mtx); ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, @@ -3808,6 +3817,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + const struct ieee80211_ht_cap *ht_cap = NULL; const struct ieee80211_ht_operation *ht_oper = NULL; const struct ieee80211_vht_operation *vht_oper = NULL; struct ieee80211_supported_band *sband; @@ -3824,14 +3834,17 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && sband->ht_cap.ht_supported) { - const u8 *ht_oper_ie, *ht_cap; + const u8 *ht_oper_ie, *ht_cap_ie; ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION); if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper)) ht_oper = (void *)(ht_oper_ie + 2); - ht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY); - if (!ht_cap || ht_cap[1] < sizeof(struct ieee80211_ht_cap)) { + ht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY); + if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap)) + ht_cap = (void *)(ht_cap_ie + 2); + + if (!ht_cap) { ifmgd->flags |= IEEE80211_STA_DISABLE_HT; ht_oper = NULL; } @@ -3862,7 +3875,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, cbss->channel, - ht_oper, vht_oper, + ht_cap, ht_oper, vht_oper, &chandef, false); sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bd2c9b22c945..a8d862f9183c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2725,7 +2725,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) sig = status->signal; if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, - rx->skb->data, rx->skb->len, 0, GFP_ATOMIC)) { + rx->skb->data, rx->skb->len, 0)) { if (rx->sta) rx->sta->rx_packets++; dev_kfree_skb(rx->skb); diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index a0a938145dcc..a9bb6eb8c3e0 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -1094,7 +1094,7 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata) if (rcu_access_pointer(local->sched_scan_sdata)) { ret = drv_sched_scan_stop(local, sdata); if (!ret) - rcu_assign_pointer(local->sched_scan_sdata, NULL); + RCU_INIT_POINTER(local->sched_scan_sdata, NULL); } out: mutex_unlock(&local->mtx); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a1e433b88c66..4dd3badab259 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1182,7 +1182,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb; int size = sizeof(*nullfunc); __le16 fc; - bool qos = test_sta_flag(sta, WLAN_STA_WME); + bool qos = sta->sta.wme; struct ieee80211_tx_info *info; struct ieee80211_chanctx_conf *chanctx_conf; @@ -1837,7 +1837,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE); - if (test_sta_flag(sta, WLAN_STA_WME)) + if (sta->sta.wme) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME); if (test_sta_flag(sta, WLAN_STA_MFP)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index d411bcc8ef08..89c40d5c0633 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -31,7 +31,6 @@ * when virtual port control is not in use. * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble * frames. - * @WLAN_STA_WME: Station is a QoS-STA. * @WLAN_STA_WDS: Station is one of our WDS peers. * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next @@ -69,7 +68,6 @@ enum ieee80211_sta_info_flags { WLAN_STA_PS_STA, WLAN_STA_AUTHORIZED, WLAN_STA_SHORT_PREAMBLE, - WLAN_STA_WME, WLAN_STA_WDS, WLAN_STA_CLEAR_PS_FILT, WLAN_STA_MFP, diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 1b21050be174..f2cb3b6c1871 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -316,8 +316,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, } /* add the QoS param IE if both the peer and we support it */ - if (local->hw.queues >= IEEE80211_NUM_ACS && - test_sta_flag(sta, WLAN_STA_WME)) + if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme) ieee80211_tdls_add_wmm_param_ie(sdata, skb); /* add any custom IEs that go before HT operation */ diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 464106c023d8..cf7141452b0f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1478,7 +1478,10 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, tail_need = max_t(int, tail_need, 0); } - if (skb_cloned(skb)) + if (skb_cloned(skb) && + (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CLONED_SKBS) || + !skb_clone_writable(skb, ETH_HLEN) || + sdata->crypto_tx_tailroom_needed_cnt)) I802_DEBUG_INC(local->tx_expand_skb_head_cloned); else if (head_need || tail_need) I802_DEBUG_INC(local->tx_expand_skb_head); @@ -1844,7 +1847,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); hdrlen = 30; authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); - wme_sta = test_sta_flag(sta, WLAN_STA_WME); + wme_sta = sta->sta.wme; } ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); @@ -1957,7 +1960,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, if (sta) { authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); - wme_sta = test_sta_flag(sta, WLAN_STA_WME); + wme_sta = sta->sta.wme; tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER); tdls_auth = test_sta_flag(sta, @@ -2035,7 +2038,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, sta = sta_info_get(sdata, hdr.addr1); if (sta) { authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); - wme_sta = test_sta_flag(sta, WLAN_STA_WME); + wme_sta = sta->sta.wme; } } @@ -2069,30 +2072,23 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, if (unlikely(!multicast && skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) { - struct sk_buff *orig_skb = skb; + struct sk_buff *ack_skb = skb_clone_sk(skb); - skb = skb_clone(skb, GFP_ATOMIC); - if (skb) { + if (ack_skb) { unsigned long flags; int id; spin_lock_irqsave(&local->ack_status_lock, flags); - id = idr_alloc(&local->ack_status_frames, orig_skb, + id = idr_alloc(&local->ack_status_frames, ack_skb, 1, 0x10000, GFP_ATOMIC); spin_unlock_irqrestore(&local->ack_status_lock, flags); if (id >= 0) { info_id = id; info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - } else if (skb_shared(skb)) { - kfree_skb(orig_skb); } else { - kfree_skb(skb); - skb = orig_skb; + kfree_skb(ack_skb); } - } else { - /* couldn't clone -- lose tx status ... */ - skb = orig_skb; } } diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index d51422c778de..6459946f0b74 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -118,7 +118,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_AP_VLAN: sta = rcu_dereference(sdata->u.vlan.sta); if (sta) { - qos = test_sta_flag(sta, WLAN_STA_WME); + qos = sta->sta.wme; break; } case NL80211_IFTYPE_AP: @@ -145,7 +145,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, if (!sta && ra && !is_multicast_ether_addr(ra)) { sta = sta_info_get(sdata, ra); if (sta) - qos = test_sta_flag(sta, WLAN_STA_WME); + qos = sta->sta.wme; } rcu_read_unlock(); diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index 7f820a108a9c..a14cf9ede171 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -86,9 +86,8 @@ fail: static void mac802154_rx_worker(struct work_struct *work) { struct rx_work *rw = container_of(work, struct rx_work, work); - struct sk_buff *skb = rw->skb; - mac802154_subif_rx(rw->dev, skb, rw->lqi); + mac802154_subif_rx(rw->dev, rw->skb, rw->lqi); kfree(rw); } @@ -101,7 +100,7 @@ ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi) if (!skb) return; - work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC); + work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) return; diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index 8124353646ae..fdf4c0e67259 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -89,8 +89,7 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, if (!(priv->phy->channels_supported[page] & (1 << chan))) { WARN_ON(1); - kfree_skb(skb); - return NETDEV_TX_OK; + goto err_tx; } mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb); @@ -103,12 +102,10 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, data[1] = crc >> 8; } - if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) { - kfree_skb(skb); - return NETDEV_TX_OK; - } + if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) + goto err_tx; - work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC); + work = kzalloc(sizeof(*work), GFP_ATOMIC); if (!work) { kfree_skb(skb); return NETDEV_TX_BUSY; @@ -129,4 +126,8 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb, queue_work(priv->dev_workqueue, &work->work); return NETDEV_TX_OK; + +err_tx: + kfree_skb(skb); + return NETDEV_TX_OK; } diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c index 547838822d5e..b7961129ce4d 100644 --- a/net/mac802154/wpan.c +++ b/net/mac802154/wpan.c @@ -475,8 +475,7 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb, rc = mac802154_llsec_decrypt(&sdata->sec, skb); if (rc) { pr_debug("decryption failed: %i\n", rc); - kfree_skb(skb); - return NET_RX_DROP; + goto fail; } sdata->dev->stats.rx_packets++; @@ -488,9 +487,12 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb, default: pr_warn("ieee802154: bad frame received (type = %d)\n", mac_cb(skb)->type); - kfree_skb(skb); - return NET_RX_DROP; + goto fail; } + +fail: + kfree_skb(skb); + return NET_RX_DROP; } static void mac802154_print_addr(const char *name, diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index b5c1d3aadb41..608d18986923 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -496,6 +496,15 @@ config NFT_LIMIT This option adds the "limit" expression that you can use to ratelimit rule matchings. +config NFT_MASQ + depends on NF_TABLES + depends on NF_CONNTRACK + depends on NF_NAT + tristate "Netfilter nf_tables masquerade support" + help + This option adds the "masquerade" expression that you can use + to perform NAT in the masquerade flavour. + config NFT_NAT depends on NF_TABLES depends on NF_CONNTRACK diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index fad5fdba34e5..a9571be3f791 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -87,6 +87,7 @@ obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o obj-$(CONFIG_NFT_HASH) += nft_hash.o obj-$(CONFIG_NFT_COUNTER) += nft_counter.o obj-$(CONFIG_NFT_LOG) += nft_log.o +obj-$(CONFIG_NFT_MASQ) += nft_masq.o # generic X tables obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index 6f1f9f494808..dafdb39ef042 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -112,7 +112,7 @@ bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb, { struct bitmap_ip *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_ip_adt_elem e = { }; + struct bitmap_ip_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); u32 ip; @@ -132,7 +132,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], struct bitmap_ip *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; u32 ip = 0, ip_to = 0; - struct bitmap_ip_adt_elem e = { }; + struct bitmap_ip_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret = 0; diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 740eabededd9..dbad505e79e3 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -203,7 +203,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, { struct bitmap_ipmac *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_ipmac_adt_elem e = {}; + struct bitmap_ipmac_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); u32 ip; @@ -232,7 +232,7 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], { const struct bitmap_ipmac *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_ipmac_adt_elem e = {}; + struct bitmap_ipmac_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0; int ret = 0; diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index cf99676e69f8..a4b65ae1986c 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c @@ -104,7 +104,7 @@ bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb, { struct bitmap_port *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_port_adt_elem e = {}; + struct bitmap_port_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); __be16 __port; u16 port = 0; @@ -129,7 +129,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], { struct bitmap_port *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct bitmap_port_adt_elem e = {}; + struct bitmap_port_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port; /* wraparound */ u16 port_to; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index ec8114fae50b..5593e97426c4 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -101,7 +101,7 @@ load_settype(const char *name) nfnl_unlock(NFNL_SUBSYS_IPSET); pr_debug("try to load ip_set_%s\n", name); if (request_module("ip_set_%s", name) < 0) { - pr_warning("Can't find ip_set type %s\n", name); + pr_warn("Can't find ip_set type %s\n", name); nfnl_lock(NFNL_SUBSYS_IPSET); return false; } @@ -195,20 +195,19 @@ ip_set_type_register(struct ip_set_type *type) int ret = 0; if (type->protocol != IPSET_PROTOCOL) { - pr_warning("ip_set type %s, family %s, revision %u:%u uses " - "wrong protocol version %u (want %u)\n", - type->name, family_name(type->family), - type->revision_min, type->revision_max, - type->protocol, IPSET_PROTOCOL); + pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n", + type->name, family_name(type->family), + type->revision_min, type->revision_max, + type->protocol, IPSET_PROTOCOL); return -EINVAL; } ip_set_type_lock(); if (find_set_type(type->name, type->family, type->revision_min)) { /* Duplicate! */ - pr_warning("ip_set type %s, family %s with revision min %u " - "already registered!\n", type->name, - family_name(type->family), type->revision_min); + pr_warn("ip_set type %s, family %s with revision min %u already registered!\n", + type->name, family_name(type->family), + type->revision_min); ret = -EINVAL; goto unlock; } @@ -228,9 +227,9 @@ ip_set_type_unregister(struct ip_set_type *type) { ip_set_type_lock(); if (!find_set_type(type->name, type->family, type->revision_min)) { - pr_warning("ip_set type %s, family %s with revision min %u " - "not registered\n", type->name, - family_name(type->family), type->revision_min); + pr_warn("ip_set type %s, family %s with revision min %u not registered\n", + type->name, family_name(type->family), + type->revision_min); goto unlock; } list_del_rcu(&type->list); diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index 61c7fb052802..8a38890cbe5e 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -565,8 +565,8 @@ retry: set->name, orig->htable_bits, htable_bits, orig); if (!htable_bits) { /* In case we have plenty of memory :-) */ - pr_warning("Cannot increase the hashsize of set %s further\n", - set->name); + pr_warn("Cannot increase the hashsize of set %s further\n", + set->name); return -IPSET_ERR_HASH_FULL; } t = ip_set_alloc(sizeof(*t) @@ -651,8 +651,8 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, if (h->elements >= h->maxelem) { if (net_ratelimit()) - pr_warning("Set %s is full, maxelem %u reached\n", - set->name, h->maxelem); + pr_warn("Set %s is full, maxelem %u reached\n", + set->name, h->maxelem); return -IPSET_ERR_HASH_FULL; } @@ -998,8 +998,8 @@ mtype_list(const struct ip_set *set, nla_put_failure: nlmsg_trim(skb, incomplete); if (unlikely(first == cb->args[IPSET_CB_ARG0])) { - pr_warning("Can't list set %s: one bucket does not fit into " - "a message. Please report it!\n", set->name); + pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n", + set->name); cb->args[IPSET_CB_ARG0] = 0; return -EMSGSIZE; } @@ -1093,7 +1093,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, if (tb[IPSET_ATTR_MARKMASK]) { markmask = ntohl(nla_get_u32(tb[IPSET_ATTR_MARKMASK])); - if ((markmask > 4294967295u) || markmask == 0) + if (markmask == 0) return -IPSET_ERR_INVALID_MARKMASK; } #endif diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index dd40607f878e..e52739938533 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -84,7 +84,7 @@ hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, { const struct hash_ip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ip4_elem e = {}; + struct hash_ip4_elem e = { 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); __be32 ip; @@ -103,7 +103,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ip4_elem e = {}; + struct hash_ip4_elem e = { 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, hosts; int ret = 0; @@ -222,7 +222,7 @@ hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb, { const struct hash_ip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ip6_elem e = {}; + struct hash_ip6_elem e = { { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); @@ -239,7 +239,7 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ip6_elem e = {}; + struct hash_ip6_elem e = { { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret; diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index 7597b82a8b03..f37a5ae8a5e0 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c @@ -94,7 +94,7 @@ hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipport4_elem e = { }; + struct hash_ipport4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, @@ -111,7 +111,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ipport *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipport4_elem e = { }; + struct hash_ipport4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip, ip_to = 0, p = 0, port, port_to; bool with_ports = false; @@ -258,7 +258,7 @@ hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipport6_elem e = { }; + struct hash_ipport6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, @@ -275,7 +275,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ipport *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipport6_elem e = { }; + struct hash_ipport6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 672655ffd573..41ef00eda874 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c @@ -95,7 +95,7 @@ hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipportip4_elem e = { }; + struct hash_ipportip4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, @@ -113,7 +113,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ipportip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipportip4_elem e = { }; + struct hash_ipportip4_elem e = { .ip = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip, ip_to = 0, p = 0, port, port_to; bool with_ports = false; @@ -265,7 +265,7 @@ hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipportip6_elem e = { }; + struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, @@ -283,7 +283,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], { const struct hash_ipportip *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; - struct hash_ipportip6_elem e = { }; + struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index 3e99987e4bf2..96b131366e7b 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c @@ -203,7 +203,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], flags |= (IPSET_FLAG_NOMATCH << 16); } - if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] && + if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO])) { e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0])); e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1])); @@ -219,9 +219,10 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], return ret; if (ip_to < ip) swap(ip, ip_to); - if (ip + UINT_MAX == ip_to) + if (unlikely(ip + UINT_MAX == ip_to)) return -IPSET_ERR_HASH_RANGE; - } + } else + ip_set_mask_from_to(ip, ip_to, e.cidr[0]); ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { @@ -230,10 +231,10 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], return ret; if (ip2_to < ip2_from) swap(ip2_from, ip2_to); - if (ip2_from + UINT_MAX == ip2_to) + if (unlikely(ip2_from + UINT_MAX == ip2_to)) return -IPSET_ERR_HASH_RANGE; - - } + } else + ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); if (retried) ip = ntohl(h->next.ip[0]); diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index c0d2ba73f8b2..2f0034347189 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c @@ -257,7 +257,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], swap(ip, ip_to); if (unlikely(ip + UINT_MAX == ip_to)) return -IPSET_ERR_HASH_RANGE; - } + } else + ip_set_mask_from_to(ip, ip_to, e.cidr[0]); port_to = port = ntohs(e.port); if (tb[IPSET_ATTR_PORT_TO]) { @@ -275,7 +276,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], swap(ip2_from, ip2_to); if (unlikely(ip2_from + UINT_MAX == ip2_to)) return -IPSET_ERR_HASH_RANGE; - } + } else + ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); if (retried) ip = ntohl(h->next.ip[0]); diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 3e2317f3cf68..f87adbad6076 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -597,7 +597,9 @@ init_list_set(struct net *net, struct ip_set *set, u32 size) struct set_elem *e; u32 i; - map = kzalloc(sizeof(*map) + size * set->dsize, GFP_KERNEL); + map = kzalloc(sizeof(*map) + + min_t(u32, size, IP_SET_LIST_MAX_SIZE) * set->dsize, + GFP_KERNEL); if (!map) return false; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index fd3f444a4f96..bd2b208ba56c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2179,29 +2179,41 @@ static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u) return 0; } +#define CMDID(cmd) (cmd - IP_VS_BASE_CTL) -#define SET_CMDID(cmd) (cmd - IP_VS_BASE_CTL) -#define SERVICE_ARG_LEN (sizeof(struct ip_vs_service_user)) -#define SVCDEST_ARG_LEN (sizeof(struct ip_vs_service_user) + \ - sizeof(struct ip_vs_dest_user)) -#define TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user)) -#define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user)) -#define MAX_ARG_LEN SVCDEST_ARG_LEN - -static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = { - [SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_FLUSH)] = 0, - [SET_CMDID(IP_VS_SO_SET_ADDDEST)] = SVCDEST_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_DELDEST)] = SVCDEST_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_EDITDEST)] = SVCDEST_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_TIMEOUT)] = TIMEOUT_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_STARTDAEMON)] = DAEMON_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_STOPDAEMON)] = DAEMON_ARG_LEN, - [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN, +struct ip_vs_svcdest_user { + struct ip_vs_service_user s; + struct ip_vs_dest_user d; }; +static const unsigned char set_arglen[CMDID(IP_VS_SO_SET_MAX) + 1] = { + [CMDID(IP_VS_SO_SET_ADD)] = sizeof(struct ip_vs_service_user), + [CMDID(IP_VS_SO_SET_EDIT)] = sizeof(struct ip_vs_service_user), + [CMDID(IP_VS_SO_SET_DEL)] = sizeof(struct ip_vs_service_user), + [CMDID(IP_VS_SO_SET_ADDDEST)] = sizeof(struct ip_vs_svcdest_user), + [CMDID(IP_VS_SO_SET_DELDEST)] = sizeof(struct ip_vs_svcdest_user), + [CMDID(IP_VS_SO_SET_EDITDEST)] = sizeof(struct ip_vs_svcdest_user), + [CMDID(IP_VS_SO_SET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user), + [CMDID(IP_VS_SO_SET_STARTDAEMON)] = sizeof(struct ip_vs_daemon_user), + [CMDID(IP_VS_SO_SET_STOPDAEMON)] = sizeof(struct ip_vs_daemon_user), + [CMDID(IP_VS_SO_SET_ZERO)] = sizeof(struct ip_vs_service_user), +}; + +union ip_vs_set_arglen { + struct ip_vs_service_user field_IP_VS_SO_SET_ADD; + struct ip_vs_service_user field_IP_VS_SO_SET_EDIT; + struct ip_vs_service_user field_IP_VS_SO_SET_DEL; + struct ip_vs_svcdest_user field_IP_VS_SO_SET_ADDDEST; + struct ip_vs_svcdest_user field_IP_VS_SO_SET_DELDEST; + struct ip_vs_svcdest_user field_IP_VS_SO_SET_EDITDEST; + struct ip_vs_timeout_user field_IP_VS_SO_SET_TIMEOUT; + struct ip_vs_daemon_user field_IP_VS_SO_SET_STARTDAEMON; + struct ip_vs_daemon_user field_IP_VS_SO_SET_STOPDAEMON; + struct ip_vs_service_user field_IP_VS_SO_SET_ZERO; +}; + +#define MAX_SET_ARGLEN sizeof(union ip_vs_set_arglen) + static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc, struct ip_vs_service_user *usvc_compat) { @@ -2239,7 +2251,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { struct net *net = sock_net(sk); int ret; - unsigned char arg[MAX_ARG_LEN]; + unsigned char arg[MAX_SET_ARGLEN]; struct ip_vs_service_user *usvc_compat; struct ip_vs_service_user_kern usvc; struct ip_vs_service *svc; @@ -2247,16 +2259,15 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) struct ip_vs_dest_user_kern udest; struct netns_ipvs *ipvs = net_ipvs(net); + BUILD_BUG_ON(sizeof(arg) > 255); if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX) return -EINVAL; - if (len < 0 || len > MAX_ARG_LEN) - return -EINVAL; - if (len != set_arglen[SET_CMDID(cmd)]) { - pr_err("set_ctl: len %u != %u\n", - len, set_arglen[SET_CMDID(cmd)]); + if (len != set_arglen[CMDID(cmd)]) { + IP_VS_DBG(1, "set_ctl: len %u != %u\n", + len, set_arglen[CMDID(cmd)]); return -EINVAL; } @@ -2512,51 +2523,51 @@ __ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u) #endif } +static const unsigned char get_arglen[CMDID(IP_VS_SO_GET_MAX) + 1] = { + [CMDID(IP_VS_SO_GET_VERSION)] = 64, + [CMDID(IP_VS_SO_GET_INFO)] = sizeof(struct ip_vs_getinfo), + [CMDID(IP_VS_SO_GET_SERVICES)] = sizeof(struct ip_vs_get_services), + [CMDID(IP_VS_SO_GET_SERVICE)] = sizeof(struct ip_vs_service_entry), + [CMDID(IP_VS_SO_GET_DESTS)] = sizeof(struct ip_vs_get_dests), + [CMDID(IP_VS_SO_GET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user), + [CMDID(IP_VS_SO_GET_DAEMON)] = 2 * sizeof(struct ip_vs_daemon_user), +}; -#define GET_CMDID(cmd) (cmd - IP_VS_BASE_CTL) -#define GET_INFO_ARG_LEN (sizeof(struct ip_vs_getinfo)) -#define GET_SERVICES_ARG_LEN (sizeof(struct ip_vs_get_services)) -#define GET_SERVICE_ARG_LEN (sizeof(struct ip_vs_service_entry)) -#define GET_DESTS_ARG_LEN (sizeof(struct ip_vs_get_dests)) -#define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user)) -#define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2) - -static const unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = { - [GET_CMDID(IP_VS_SO_GET_VERSION)] = 64, - [GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_SERVICE)] = GET_SERVICE_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_DESTS)] = GET_DESTS_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_TIMEOUT)] = GET_TIMEOUT_ARG_LEN, - [GET_CMDID(IP_VS_SO_GET_DAEMON)] = GET_DAEMON_ARG_LEN, +union ip_vs_get_arglen { + char field_IP_VS_SO_GET_VERSION[64]; + struct ip_vs_getinfo field_IP_VS_SO_GET_INFO; + struct ip_vs_get_services field_IP_VS_SO_GET_SERVICES; + struct ip_vs_service_entry field_IP_VS_SO_GET_SERVICE; + struct ip_vs_get_dests field_IP_VS_SO_GET_DESTS; + struct ip_vs_timeout_user field_IP_VS_SO_GET_TIMEOUT; + struct ip_vs_daemon_user field_IP_VS_SO_GET_DAEMON[2]; }; +#define MAX_GET_ARGLEN sizeof(union ip_vs_get_arglen) + static int do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { - unsigned char arg[128]; + unsigned char arg[MAX_GET_ARGLEN]; int ret = 0; unsigned int copylen; struct net *net = sock_net(sk); struct netns_ipvs *ipvs = net_ipvs(net); BUG_ON(!net); + BUILD_BUG_ON(sizeof(arg) > 255); if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX) return -EINVAL; - if (*len < get_arglen[GET_CMDID(cmd)]) { - pr_err("get_ctl: len %u < %u\n", - *len, get_arglen[GET_CMDID(cmd)]); + copylen = get_arglen[CMDID(cmd)]; + if (*len < (int) copylen) { + IP_VS_DBG(1, "get_ctl: len %d < %u\n", *len, copylen); return -EINVAL; } - copylen = get_arglen[GET_CMDID(cmd)]; - if (copylen > 128) - return -EINVAL; - if (copy_from_user(arg, user, copylen) != 0) return -EFAULT; /* diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index de88c4ab5146..5016a6929085 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -142,7 +142,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone) static u32 __hash_bucket(u32 hash, unsigned int size) { - return ((u64)hash * size) >> 32; + return reciprocal_scale(hash, size); } static u32 hash_bucket(u32 hash, const struct net *net) @@ -358,7 +358,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) - tstamp->stop = ktime_to_ns(ktime_get_real()); + tstamp->stop = ktime_get_real_ns(); if (nf_ct_is_dying(ct)) goto delete; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index f87e8f68ad45..91a1837acd0e 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -83,7 +83,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd); - return ((u64)hash * nf_ct_expect_hsize) >> 32; + + return reciprocal_scale(hash, nf_ct_expect_hsize); } struct nf_conntrack_expect * diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 355a5c4ef763..1bd9ed9e62f6 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1737,7 +1737,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, } tstamp = nf_conn_tstamp_find(ct); if (tstamp) - tstamp->start = ktime_to_ns(ktime_get_real()); + tstamp->start = ktime_get_real_ns(); err = nf_conntrack_hash_check_insert(ct); if (err < 0) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index f641751dba9d..cf65a1e040dd 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -101,7 +101,7 @@ static void *ct_seq_start(struct seq_file *seq, loff_t *pos) { struct ct_iter_state *st = seq->private; - st->time_now = ktime_to_ns(ktime_get_real()); + st->time_now = ktime_get_real_ns(); rcu_read_lock(); return ct_get_idx(seq, *pos); } diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 552f97cd9fde..4e0b47831d43 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -126,7 +126,8 @@ hash_by_src(const struct net *net, u16 zone, /* Original src, to ensure we map it consistently if poss. */ hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32), tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd); - return ((u64)hash * net->ct.nat_htable_size) >> 32; + + return reciprocal_scale(hash, net->ct.nat_htable_size); } /* Is this tuple already taken? (not by us) */ @@ -274,7 +275,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple, } var_ipp->all[i] = (__force __u32) - htonl(minip + (((u64)j * dist) >> 32)); + htonl(minip + reciprocal_scale(j, dist)); if (var_ipp->all[i] != range->max_addr.all[i]) full_range = true; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index deeb95fb7028..82374601577e 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -127,6 +127,204 @@ static void nft_trans_destroy(struct nft_trans *trans) kfree(trans); } +static void nf_tables_unregister_hooks(const struct nft_table *table, + const struct nft_chain *chain, + unsigned int hook_nops) +{ + if (!(table->flags & NFT_TABLE_F_DORMANT) && + chain->flags & NFT_BASE_CHAIN) + nf_unregister_hooks(nft_base_chain(chain)->ops, hook_nops); +} + +/* Internal table flags */ +#define NFT_TABLE_INACTIVE (1 << 15) + +static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) +{ + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table)); + if (trans == NULL) + return -ENOMEM; + + if (msg_type == NFT_MSG_NEWTABLE) + ctx->table->flags |= NFT_TABLE_INACTIVE; + + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + return 0; +} + +static int nft_deltable(struct nft_ctx *ctx) +{ + int err; + + err = nft_trans_table_add(ctx, NFT_MSG_DELTABLE); + if (err < 0) + return err; + + list_del_rcu(&ctx->table->list); + return err; +} + +static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) +{ + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); + if (trans == NULL) + return -ENOMEM; + + if (msg_type == NFT_MSG_NEWCHAIN) + ctx->chain->flags |= NFT_CHAIN_INACTIVE; + + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + return 0; +} + +static int nft_delchain(struct nft_ctx *ctx) +{ + int err; + + err = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN); + if (err < 0) + return err; + + ctx->table->use--; + list_del_rcu(&ctx->chain->list); + + return err; +} + +static inline bool +nft_rule_is_active(struct net *net, const struct nft_rule *rule) +{ + return (rule->genmask & (1 << net->nft.gencursor)) == 0; +} + +static inline int gencursor_next(struct net *net) +{ + return net->nft.gencursor+1 == 1 ? 1 : 0; +} + +static inline int +nft_rule_is_active_next(struct net *net, const struct nft_rule *rule) +{ + return (rule->genmask & (1 << gencursor_next(net))) == 0; +} + +static inline void +nft_rule_activate_next(struct net *net, struct nft_rule *rule) +{ + /* Now inactive, will be active in the future */ + rule->genmask = (1 << net->nft.gencursor); +} + +static inline void +nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) +{ + rule->genmask = (1 << gencursor_next(net)); +} + +static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) +{ + rule->genmask = 0; +} + +static int +nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) +{ + /* You cannot delete the same rule twice */ + if (nft_rule_is_active_next(ctx->net, rule)) { + nft_rule_deactivate_next(ctx->net, rule); + ctx->chain->use--; + return 0; + } + return -ENOENT; +} + +static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type, + struct nft_rule *rule) +{ + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule)); + if (trans == NULL) + return NULL; + + nft_trans_rule(trans) = rule; + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + + return trans; +} + +static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) +{ + struct nft_trans *trans; + int err; + + trans = nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule); + if (trans == NULL) + return -ENOMEM; + + err = nf_tables_delrule_deactivate(ctx, rule); + if (err < 0) { + nft_trans_destroy(trans); + return err; + } + + return 0; +} + +static int nft_delrule_by_chain(struct nft_ctx *ctx) +{ + struct nft_rule *rule; + int err; + + list_for_each_entry(rule, &ctx->chain->rules, list) { + err = nft_delrule(ctx, rule); + if (err < 0) + return err; + } + return 0; +} + +/* Internal set flag */ +#define NFT_SET_INACTIVE (1 << 15) + +static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, + struct nft_set *set) +{ + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set)); + if (trans == NULL) + return -ENOMEM; + + if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { + nft_trans_set_id(trans) = + ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); + set->flags |= NFT_SET_INACTIVE; + } + nft_trans_set(trans) = set; + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + + return 0; +} + +static int nft_delset(struct nft_ctx *ctx, struct nft_set *set) +{ + int err; + + err = nft_trans_set_add(ctx, NFT_MSG_DELSET, set); + if (err < 0) + return err; + + list_del_rcu(&set->list); + ctx->table->use--; + + return err; +} + /* * Tables */ @@ -309,9 +507,6 @@ done: return skb->len; } -/* Internal table flags */ -#define NFT_TABLE_INACTIVE (1 << 15) - static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -443,21 +638,6 @@ err: return ret; } -static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) -{ - struct nft_trans *trans; - - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table)); - if (trans == NULL) - return -ENOMEM; - - if (msg_type == NFT_MSG_NEWTABLE) - ctx->table->flags |= NFT_TABLE_INACTIVE; - - list_add_tail(&trans->list, &ctx->net->nft.commit_list); - return 0; -} - static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -527,6 +707,67 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, return 0; } +static int nft_flush_table(struct nft_ctx *ctx) +{ + int err; + struct nft_chain *chain, *nc; + struct nft_set *set, *ns; + + list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + ctx->chain = chain; + + err = nft_delrule_by_chain(ctx); + if (err < 0) + goto out; + + err = nft_delchain(ctx); + if (err < 0) + goto out; + } + + list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { + if (set->flags & NFT_SET_ANONYMOUS && + !list_empty(&set->bindings)) + continue; + + err = nft_delset(ctx, set); + if (err < 0) + goto out; + } + + err = nft_deltable(ctx); +out: + return err; +} + +static int nft_flush(struct nft_ctx *ctx, int family) +{ + struct nft_af_info *afi; + struct nft_table *table, *nt; + const struct nlattr * const *nla = ctx->nla; + int err = 0; + + list_for_each_entry(afi, &ctx->net->nft.af_info, list) { + if (family != AF_UNSPEC && afi->family != family) + continue; + + ctx->afi = afi; + list_for_each_entry_safe(table, nt, &afi->tables, list) { + if (nla[NFTA_TABLE_NAME] && + nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0) + continue; + + ctx->table = table; + + err = nft_flush_table(ctx); + if (err < 0) + goto out; + } + } +out: + return err; +} + static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -535,9 +776,13 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, struct nft_af_info *afi; struct nft_table *table; struct net *net = sock_net(skb->sk); - int family = nfmsg->nfgen_family, err; + int family = nfmsg->nfgen_family; struct nft_ctx ctx; + nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); + if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) + return nft_flush(&ctx, family); + afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) return PTR_ERR(afi); @@ -547,16 +792,11 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, return PTR_ERR(table); if (table->flags & NFT_TABLE_INACTIVE) return -ENOENT; - if (table->use > 0) - return -EBUSY; - nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); - err = nft_trans_table_add(&ctx, NFT_MSG_DELTABLE); - if (err < 0) - return err; + ctx.afi = afi; + ctx.table = table; - list_del_rcu(&table->list); - return 0; + return nft_flush_table(&ctx); } static void nf_tables_table_destroy(struct nft_ctx *ctx) @@ -913,21 +1153,6 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain, rcu_assign_pointer(chain->stats, newstats); } -static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) -{ - struct nft_trans *trans; - - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); - if (trans == NULL) - return -ENOMEM; - - if (msg_type == NFT_MSG_NEWCHAIN) - ctx->chain->flags |= NFT_CHAIN_INACTIVE; - - list_add_tail(&trans->list, &ctx->net->nft.commit_list); - return 0; -} - static void nf_tables_chain_destroy(struct nft_chain *chain) { BUG_ON(chain->use > 0); @@ -1157,11 +1382,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, list_add_tail_rcu(&chain->list, &table->chains); return 0; err2: - if (!(table->flags & NFT_TABLE_F_DORMANT) && - chain->flags & NFT_BASE_CHAIN) { - nf_unregister_hooks(nft_base_chain(chain)->ops, - afi->nops); - } + nf_tables_unregister_hooks(table, chain, afi->nops); err1: nf_tables_chain_destroy(chain); return err; @@ -1178,7 +1399,6 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, struct net *net = sock_net(skb->sk); int family = nfmsg->nfgen_family; struct nft_ctx ctx; - int err; afi = nf_tables_afinfo_lookup(net, family, false); if (IS_ERR(afi)) @@ -1199,13 +1419,8 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, return -EBUSY; nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); - err = nft_trans_chain_add(&ctx, NFT_MSG_DELCHAIN); - if (err < 0) - return err; - table->use--; - list_del_rcu(&chain->list); - return 0; + return nft_delchain(&ctx); } /* @@ -1527,41 +1742,6 @@ err: return err; } -static inline bool -nft_rule_is_active(struct net *net, const struct nft_rule *rule) -{ - return (rule->genmask & (1 << net->nft.gencursor)) == 0; -} - -static inline int gencursor_next(struct net *net) -{ - return net->nft.gencursor+1 == 1 ? 1 : 0; -} - -static inline int -nft_rule_is_active_next(struct net *net, const struct nft_rule *rule) -{ - return (rule->genmask & (1 << gencursor_next(net))) == 0; -} - -static inline void -nft_rule_activate_next(struct net *net, struct nft_rule *rule) -{ - /* Now inactive, will be active in the future */ - rule->genmask = (1 << net->nft.gencursor); -} - -static inline void -nft_rule_disactivate_next(struct net *net, struct nft_rule *rule) -{ - rule->genmask = (1 << gencursor_next(net)); -} - -static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) -{ - rule->genmask = 0; -} - static int nf_tables_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) { @@ -1687,21 +1867,6 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, kfree(rule); } -static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type, - struct nft_rule *rule) -{ - struct nft_trans *trans; - - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule)); - if (trans == NULL) - return NULL; - - nft_trans_rule(trans) = rule; - list_add_tail(&trans->list, &ctx->net->nft.commit_list); - - return trans; -} - #define NFT_RULE_MAXEXPRS 128 static struct nft_expr_info *info; @@ -1823,7 +1988,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, err = -ENOMEM; goto err2; } - nft_rule_disactivate_next(net, old_rule); + nft_rule_deactivate_next(net, old_rule); chain->use--; list_add_tail_rcu(&rule->list, &old_rule->list); } else { @@ -1867,33 +2032,6 @@ err1: return err; } -static int -nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule) -{ - /* You cannot delete the same rule twice */ - if (nft_rule_is_active_next(ctx->net, rule)) { - if (nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule) == NULL) - return -ENOMEM; - nft_rule_disactivate_next(ctx->net, rule); - ctx->chain->use--; - return 0; - } - return -ENOENT; -} - -static int nf_table_delrule_by_chain(struct nft_ctx *ctx) -{ - struct nft_rule *rule; - int err; - - list_for_each_entry(rule, &ctx->chain->rules, list) { - err = nf_tables_delrule_one(ctx, rule); - if (err < 0) - return err; - } - return 0; -} - static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -1932,14 +2070,14 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, if (IS_ERR(rule)) return PTR_ERR(rule); - err = nf_tables_delrule_one(&ctx, rule); + err = nft_delrule(&ctx, rule); } else { - err = nf_table_delrule_by_chain(&ctx); + err = nft_delrule_by_chain(&ctx); } } else { list_for_each_entry(chain, &table->chains, list) { ctx.chain = chain; - err = nf_table_delrule_by_chain(&ctx); + err = nft_delrule_by_chain(&ctx); if (err < 0) break; } @@ -2322,8 +2460,6 @@ static int nf_tables_dump_sets_done(struct netlink_callback *cb) return 0; } -#define NFT_SET_INACTIVE (1 << 15) /* Internal set flag */ - static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -2398,26 +2534,6 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx, return 0; } -static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, - struct nft_set *set) -{ - struct nft_trans *trans; - - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set)); - if (trans == NULL) - return -ENOMEM; - - if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { - nft_trans_set_id(trans) = - ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); - set->flags |= NFT_SET_INACTIVE; - } - nft_trans_set(trans) = set; - list_add_tail(&trans->list, &ctx->net->nft.commit_list); - - return 0; -} - static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) @@ -2611,13 +2727,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, if (!list_empty(&set->bindings)) return -EBUSY; - err = nft_trans_set_add(&ctx, NFT_MSG_DELSET, set); - if (err < 0) - return err; - - list_del_rcu(&set->list); - ctx.table->use--; - return 0; + return nft_delset(&ctx, set); } static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx, @@ -3352,11 +3462,9 @@ static int nf_tables_commit(struct sk_buff *skb) break; case NFT_MSG_DELCHAIN: nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN); - if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && - trans->ctx.chain->flags & NFT_BASE_CHAIN) { - nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, - trans->ctx.afi->nops); - } + nf_tables_unregister_hooks(trans->ctx.table, + trans->ctx.chain, + trans->ctx.afi->nops); break; case NFT_MSG_NEWRULE: nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); @@ -3479,11 +3587,9 @@ static int nf_tables_abort(struct sk_buff *skb) } else { trans->ctx.table->use--; list_del_rcu(&trans->ctx.chain->list); - if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && - trans->ctx.chain->flags & NFT_BASE_CHAIN) { - nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, - trans->ctx.afi->nops); - } + nf_tables_unregister_hooks(trans->ctx.table, + trans->ctx.chain, + trans->ctx.afi->nops); } break; case NFT_MSG_DELCHAIN: diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 3ea0eacbd970..c18af2f63eef 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -40,6 +40,11 @@ struct nf_acct { char data[0]; }; +struct nfacct_filter { + u32 value; + u32 mask; +}; + #define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES) #define NFACCT_OVERQUOTA_BIT 2 /* NFACCT_F_OVERQUOTA */ @@ -181,6 +186,7 @@ static int nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct nf_acct *cur, *last; + const struct nfacct_filter *filter = cb->data; if (cb->args[2]) return 0; @@ -197,6 +203,10 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb) last = NULL; } + + if (filter && (cur->flags & filter->mask) != filter->value) + continue; + if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFNL_MSG_TYPE(cb->nlh->nlmsg_type), @@ -211,6 +221,38 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } +static int nfnl_acct_done(struct netlink_callback *cb) +{ + kfree(cb->data); + return 0; +} + +static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = { + [NFACCT_FILTER_MASK] = { .type = NLA_U32 }, + [NFACCT_FILTER_VALUE] = { .type = NLA_U32 }, +}; + +static struct nfacct_filter * +nfacct_filter_alloc(const struct nlattr * const attr) +{ + struct nfacct_filter *filter; + struct nlattr *tb[NFACCT_FILTER_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy); + if (err < 0) + return ERR_PTR(err); + + filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL); + if (!filter) + return ERR_PTR(-ENOMEM); + + filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK])); + filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE])); + + return filter; +} + static int nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const tb[]) @@ -222,7 +264,18 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb, if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = nfnl_acct_dump, + .done = nfnl_acct_done, }; + + if (tb[NFACCT_FILTER]) { + struct nfacct_filter *filter; + + filter = nfacct_filter_alloc(tb[NFACCT_FILTER]); + if (IS_ERR(filter)) + return PTR_ERR(filter); + + c.data = filter; + } return netlink_dump_start(nfnl, skb, nlh, &c); } @@ -314,6 +367,7 @@ static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = { [NFACCT_PKTS] = { .type = NLA_U64 }, [NFACCT_FLAGS] = { .type = NLA_U32 }, [NFACCT_QUOTA] = { .type = NLA_U64 }, + [NFACCT_FILTER] = {.type = NLA_NESTED }, }; static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = { diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c new file mode 100644 index 000000000000..6637bab00567 --- /dev/null +++ b/net/netfilter/nft_masq.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netlink.h> +#include <linux/netfilter.h> +#include <linux/netfilter/nf_tables.h> +#include <net/netfilter/nf_tables.h> +#include <net/netfilter/nf_nat.h> +#include <net/netfilter/nft_masq.h> + +const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = { + [NFTA_MASQ_FLAGS] = { .type = NLA_U32 }, +}; +EXPORT_SYMBOL_GPL(nft_masq_policy); + +int nft_masq_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_masq *priv = nft_expr_priv(expr); + + if (tb[NFTA_MASQ_FLAGS] == NULL) + return 0; + + priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS])); + if (priv->flags & ~NF_NAT_RANGE_MASK) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(nft_masq_init); + +int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr) +{ + const struct nft_masq *priv = nft_expr_priv(expr); + + if (priv->flags == 0) + return 0; + + if (nla_put_be32(skb, NFTA_MASQ_FLAGS, htonl(priv->flags))) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -1; +} +EXPORT_SYMBOL_GPL(nft_masq_dump); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 852b178c6ae7..1e7c076ca63a 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -14,6 +14,10 @@ #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/smp.h> #include <net/dst.h> #include <net/sock.h> #include <net/tcp_states.h> /* for TCP_TIME_WAIT */ @@ -124,6 +128,43 @@ void nft_meta_get_eval(const struct nft_expr *expr, dest->data[0] = skb->secmark; break; #endif + case NFT_META_PKTTYPE: + if (skb->pkt_type != PACKET_LOOPBACK) { + dest->data[0] = skb->pkt_type; + break; + } + + switch (pkt->ops->pf) { + case NFPROTO_IPV4: + if (ipv4_is_multicast(ip_hdr(skb)->daddr)) + dest->data[0] = PACKET_MULTICAST; + else + dest->data[0] = PACKET_BROADCAST; + break; + case NFPROTO_IPV6: + if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF) + dest->data[0] = PACKET_MULTICAST; + else + dest->data[0] = PACKET_BROADCAST; + break; + default: + WARN_ON(1); + goto err; + } + break; + case NFT_META_CPU: + dest->data[0] = smp_processor_id(); + break; + case NFT_META_IIFGROUP: + if (in == NULL) + goto err; + dest->data[0] = in->group; + break; + case NFT_META_OIFGROUP: + if (out == NULL) + goto err; + dest->data[0] = out->group; + break; default: WARN_ON(1); goto err; @@ -195,6 +236,10 @@ int nft_meta_get_init(const struct nft_ctx *ctx, #ifdef CONFIG_NETWORK_SECMARK case NFT_META_SECMARK: #endif + case NFT_META_PKTTYPE: + case NFT_META_CPU: + case NFT_META_IIFGROUP: + case NFT_META_OIFGROUP: break; default: return -EOPNOTSUPP; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 79ff58cd36dc..799550b476fb 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -33,6 +33,7 @@ struct nft_nat { enum nft_registers sreg_proto_max:8; enum nf_nat_manip_type type:8; u8 family; + u16 flags; }; static void nft_nat_eval(const struct nft_expr *expr, @@ -71,6 +72,8 @@ static void nft_nat_eval(const struct nft_expr *expr, range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } + range.flags |= priv->flags; + data[NFT_REG_VERDICT].verdict = nf_nat_setup_info(ct, &range, priv->type); } @@ -82,6 +85,7 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = { [NFTA_NAT_REG_ADDR_MAX] = { .type = NLA_U32 }, [NFTA_NAT_REG_PROTO_MIN] = { .type = NLA_U32 }, [NFTA_NAT_REG_PROTO_MAX] = { .type = NLA_U32 }, + [NFTA_NAT_FLAGS] = { .type = NLA_U32 }, }; static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, @@ -149,6 +153,12 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, } else priv->sreg_proto_max = priv->sreg_proto_min; + if (tb[NFTA_NAT_FLAGS]) { + priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); + if (priv->flags & ~NF_NAT_RANGE_MASK) + return -EINVAL; + } + return 0; } @@ -183,6 +193,12 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr) htonl(priv->sreg_proto_max))) goto nla_put_failure; } + + if (priv->flags != 0) { + if (nla_put_be32(skb, NFTA_NAT_FLAGS, htonl(priv->flags))) + goto nla_put_failure; + } + return 0; nla_put_failure: diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c index 73b73f687c58..02afaf48a729 100644 --- a/net/netfilter/xt_HMARK.c +++ b/net/netfilter/xt_HMARK.c @@ -126,7 +126,7 @@ hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd); hash = hash ^ (t->proto & info->proto_mask); - return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; + return reciprocal_scale(hash, info->hmodulus) + info->hoffset; } static void diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c index f4af1bfafb1c..96fa26b20b67 100644 --- a/net/netfilter/xt_cluster.c +++ b/net/netfilter/xt_cluster.c @@ -55,7 +55,8 @@ xt_cluster_hash(const struct nf_conn *ct, WARN_ON(1); break; } - return (((u64)hash * info->total_nodes) >> 32); + + return reciprocal_scale(hash, info->total_nodes); } static inline bool diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c index 1e634615ab9d..d4bec261e74e 100644 --- a/net/netfilter/xt_connbytes.c +++ b/net/netfilter/xt_connbytes.c @@ -120,7 +120,7 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par) * accounting is enabled, so complain in the hope that someone notices. */ if (!nf_ct_acct_enabled(par->net)) { - pr_warning("Forcing CT accounting to be enabled\n"); + pr_warn("Forcing CT accounting to be enabled\n"); nf_ct_set_acct(par->net, true); } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 47dc6836830a..05fbc2a0be46 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -135,7 +135,7 @@ hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst) * give results between [0 and cfg.size-1] and same hash distribution, * but using a multiply, less expensive than a divide */ - return ((u64)hash * ht->cfg.size) >> 32; + return reciprocal_scale(hash, ht->cfg.size); } static struct dsthash_ent * @@ -943,7 +943,7 @@ static int __init hashlimit_mt_init(void) sizeof(struct dsthash_ent), 0, 0, NULL); if (!hashlimit_cachep) { - pr_warning("unable to create slab cache\n"); + pr_warn("unable to create slab cache\n"); goto err2; } return 0; diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 80c2e2d603e0..cb70f6ec5695 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -84,13 +84,12 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par) index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find set identified by id %u to match\n", - info->match_set.index); + pr_warn("Cannot find set identified by id %u to match\n", + info->match_set.index); return -ENOENT; } if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { - pr_warning("Protocol error: set match dimension " - "is over the limit!\n"); + pr_warn("Protocol error: set match dimension is over the limit!\n"); ip_set_nfnl_put(par->net, info->match_set.index); return -ERANGE; } @@ -134,13 +133,12 @@ set_match_v1_checkentry(const struct xt_mtchk_param *par) index = ip_set_nfnl_get_byindex(par->net, info->match_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find set identified by id %u to match\n", - info->match_set.index); + pr_warn("Cannot find set identified by id %u to match\n", + info->match_set.index); return -ENOENT; } if (info->match_set.dim > IPSET_DIM_MAX) { - pr_warning("Protocol error: set match dimension " - "is over the limit!\n"); + pr_warn("Protocol error: set match dimension is over the limit!\n"); ip_set_nfnl_put(par->net, info->match_set.index); return -ERANGE; } @@ -230,8 +228,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) if (info->add_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find add_set index %u as target\n", - info->add_set.index); + pr_warn("Cannot find add_set index %u as target\n", + info->add_set.index); return -ENOENT; } } @@ -239,8 +237,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) if (info->del_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find del_set index %u as target\n", - info->del_set.index); + pr_warn("Cannot find del_set index %u as target\n", + info->del_set.index); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(par->net, info->add_set.index); return -ENOENT; @@ -248,8 +246,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par) } if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 || info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { - pr_warning("Protocol error: SET target dimension " - "is over the limit!\n"); + pr_warn("Protocol error: SET target dimension is over the limit!\n"); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(par->net, info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) @@ -303,8 +300,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) if (info->add_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(par->net, info->add_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find add_set index %u as target\n", - info->add_set.index); + pr_warn("Cannot find add_set index %u as target\n", + info->add_set.index); return -ENOENT; } } @@ -312,8 +309,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) if (info->del_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(par->net, info->del_set.index); if (index == IPSET_INVALID_ID) { - pr_warning("Cannot find del_set index %u as target\n", - info->del_set.index); + pr_warn("Cannot find del_set index %u as target\n", + info->del_set.index); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(par->net, info->add_set.index); return -ENOENT; @@ -321,8 +318,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par) } if (info->add_set.dim > IPSET_DIM_MAX || info->del_set.dim > IPSET_DIM_MAX) { - pr_warning("Protocol error: SET target dimension " - "is over the limit!\n"); + pr_warn("Protocol error: SET target dimension is over the limit!\n"); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(par->net, info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c index d3c48b14ab94..5699adb97652 100644 --- a/net/netfilter/xt_string.c +++ b/net/netfilter/xt_string.c @@ -29,7 +29,6 @@ string_mt(const struct sk_buff *skb, struct xt_action_param *par) struct ts_state state; bool invert; - memset(&state, 0, sizeof(struct ts_state)); invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT; return (skb_find_text((struct sk_buff *)skb, conf->from_offset, diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 5231652a95d9..6932a42e41a2 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -35,11 +35,78 @@ #include <net/sctp/checksum.h> #include "datapath.h" +#include "flow.h" #include "vport.h" static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, const struct nlattr *attr, int len); +struct deferred_action { + struct sk_buff *skb; + const struct nlattr *actions; + + /* Store pkt_key clone when creating deferred action. */ + struct sw_flow_key pkt_key; +}; + +#define DEFERRED_ACTION_FIFO_SIZE 10 +struct action_fifo { + int head; + int tail; + /* Deferred action fifo queue storage. */ + struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE]; +}; + +static struct action_fifo __percpu *action_fifos; +static DEFINE_PER_CPU(int, exec_actions_level); + +static void action_fifo_init(struct action_fifo *fifo) +{ + fifo->head = 0; + fifo->tail = 0; +} + +static bool action_fifo_is_empty(struct action_fifo *fifo) +{ + return (fifo->head == fifo->tail); +} + +static struct deferred_action *action_fifo_get(struct action_fifo *fifo) +{ + if (action_fifo_is_empty(fifo)) + return NULL; + + return &fifo->fifo[fifo->tail++]; +} + +static struct deferred_action *action_fifo_put(struct action_fifo *fifo) +{ + if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1) + return NULL; + + return &fifo->fifo[fifo->head++]; +} + +/* Return true if fifo is not full */ +static struct deferred_action *add_deferred_actions(struct sk_buff *skb, + struct sw_flow_key *key, + const struct nlattr *attr) +{ + struct action_fifo *fifo; + struct deferred_action *da; + + fifo = this_cpu_ptr(action_fifos); + da = action_fifo_put(fifo); + if (da) { + da->skb = skb; + da->actions = attr; + da->pkt_key = *key; + } + + return da; +} + static int make_writable(struct sk_buff *skb, int write_len) { if (!pskb_may_pull(skb, write_len)) @@ -410,16 +477,14 @@ static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) } static int output_userspace(struct datapath *dp, struct sk_buff *skb, - const struct nlattr *attr) + struct sw_flow_key *key, const struct nlattr *attr) { struct dp_upcall_info upcall; const struct nlattr *a; int rem; - BUG_ON(!OVS_CB(skb)->pkt_key); - upcall.cmd = OVS_PACKET_CMD_ACTION; - upcall.key = OVS_CB(skb)->pkt_key; + upcall.key = key; upcall.userdata = NULL; upcall.portid = 0; @@ -445,11 +510,10 @@ static bool last_action(const struct nlattr *a, int rem) } static int sample(struct datapath *dp, struct sk_buff *skb, - const struct nlattr *attr) + struct sw_flow_key *key, const struct nlattr *attr) { const struct nlattr *acts_list = NULL; const struct nlattr *a; - struct sk_buff *sample_skb; int rem; for (a = nla_data(attr), rem = nla_len(attr); rem > 0; @@ -469,31 +533,47 @@ static int sample(struct datapath *dp, struct sk_buff *skb, rem = nla_len(acts_list); a = nla_data(acts_list); - /* Actions list is either empty or only contains a single user-space - * action, the latter being a special case as it is the only known - * usage of the sample action. - * In these special cases don't clone the skb as there are no - * side-effects in the nested actions. - * Otherwise, clone in case the nested actions have side effects. + /* Actions list is empty, do nothing */ + if (unlikely(!rem)) + return 0; + + /* The only known usage of sample action is having a single user-space + * action. Treat this usage as a special case. + * The output_userspace() should clone the skb to be sent to the + * user space. This skb will be consumed by its caller. */ - if (likely(rem == 0 || (nla_type(a) == OVS_ACTION_ATTR_USERSPACE && - last_action(a, rem)))) { - sample_skb = skb; - skb_get(skb); - } else { - sample_skb = skb_clone(skb, GFP_ATOMIC); - if (!sample_skb) /* Skip sample action when out of memory. */ - return 0; + if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && + last_action(a, rem))) + return output_userspace(dp, skb, key, a); + + skb = skb_clone(skb, GFP_ATOMIC); + if (!skb) + /* Skip the sample action when out of memory. */ + return 0; + + if (!add_deferred_actions(skb, key, a)) { + if (net_ratelimit()) + pr_warn("%s: deferred actions limit reached, dropping sample action\n", + ovs_dp_name(dp)); + + kfree_skb(skb); } + return 0; +} - /* Note that do_execute_actions() never consumes skb. - * In the case where skb has been cloned above it is the clone that - * is consumed. Otherwise the skb_get(skb) call prevents - * consumption by do_execute_actions(). Thus, it is safe to simply - * return the error code and let the caller (also - * do_execute_actions()) free skb on error. - */ - return do_execute_actions(dp, sample_skb, a, rem); +static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, + const struct nlattr *attr) +{ + struct ovs_action_hash *hash_act = nla_data(attr); + u32 hash = 0; + + /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */ + hash = skb_get_hash(skb); + hash = jhash_1word(hash, hash_act->hash_basis); + if (!hash) + hash = 0x1; + + key->ovs_flow_hash = hash; } static int execute_set_action(struct sk_buff *skb, @@ -511,7 +591,7 @@ static int execute_set_action(struct sk_buff *skb, break; case OVS_KEY_ATTR_IPV4_TUNNEL: - OVS_CB(skb)->tun_key = nla_data(nested_attr); + OVS_CB(skb)->egress_tun_key = nla_data(nested_attr); break; case OVS_KEY_ATTR_ETHERNET: @@ -542,8 +622,47 @@ static int execute_set_action(struct sk_buff *skb, return err; } +static int execute_recirc(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, + const struct nlattr *a, int rem) +{ + struct deferred_action *da; + int err; + + err = ovs_flow_key_update(skb, key); + if (err) + return err; + + if (!last_action(a, rem)) { + /* Recirc action is the not the last action + * of the action list, need to clone the skb. + */ + skb = skb_clone(skb, GFP_ATOMIC); + + /* Skip the recirc action when out of memory, but + * continue on with the rest of the action list. + */ + if (!skb) + return 0; + } + + da = add_deferred_actions(skb, key, NULL); + if (da) { + da->pkt_key.recirc_id = nla_get_u32(a); + } else { + kfree_skb(skb); + + if (net_ratelimit()) + pr_warn("%s: deferred action limit reached, drop recirc action\n", + ovs_dp_name(dp)); + } + + return 0; +} + /* Execute a list of actions against 'skb'. */ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key, const struct nlattr *attr, int len) { /* Every output action needs a separate clone of 'skb', but the common @@ -569,7 +688,11 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_USERSPACE: - output_userspace(dp, skb, a); + output_userspace(dp, skb, key, a); + break; + + case OVS_ACTION_ATTR_HASH: + execute_hash(skb, key, a); break; case OVS_ACTION_ATTR_PUSH_VLAN: @@ -582,12 +705,23 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, err = pop_vlan(skb); break; + case OVS_ACTION_ATTR_RECIRC: + err = execute_recirc(dp, skb, key, a, rem); + if (last_action(a, rem)) { + /* If this is the last action, the skb has + * been consumed or freed. + * Return immediately. + */ + return err; + } + break; + case OVS_ACTION_ATTR_SET: err = execute_set_action(skb, nla_data(a)); break; case OVS_ACTION_ATTR_SAMPLE: - err = sample(dp, skb, a); + err = sample(dp, skb, key, a); if (unlikely(err)) /* skb already freed. */ return err; break; @@ -607,11 +741,63 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, return 0; } +static void process_deferred_actions(struct datapath *dp) +{ + struct action_fifo *fifo = this_cpu_ptr(action_fifos); + + /* Do not touch the FIFO in case there is no deferred actions. */ + if (action_fifo_is_empty(fifo)) + return; + + /* Finishing executing all deferred actions. */ + do { + struct deferred_action *da = action_fifo_get(fifo); + struct sk_buff *skb = da->skb; + struct sw_flow_key *key = &da->pkt_key; + const struct nlattr *actions = da->actions; + + if (actions) + do_execute_actions(dp, skb, key, actions, + nla_len(actions)); + else + ovs_dp_process_packet(skb, key); + } while (!action_fifo_is_empty(fifo)); + + /* Reset FIFO for the next packet. */ + action_fifo_init(fifo); +} + /* Execute a list of actions against 'skb'. */ -int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb) +int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *key) { - struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + int level = this_cpu_read(exec_actions_level); + struct sw_flow_actions *acts; + int err; + + acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + + this_cpu_inc(exec_actions_level); + err = do_execute_actions(dp, skb, key, + acts->actions, acts->actions_len); - OVS_CB(skb)->tun_key = NULL; - return do_execute_actions(dp, skb, acts->actions, acts->actions_len); + if (!level) + process_deferred_actions(dp); + + this_cpu_dec(exec_actions_level); + return err; +} + +int action_fifos_init(void) +{ + action_fifos = alloc_percpu(struct action_fifo); + if (!action_fifos) + return -ENOMEM; + + return 0; +} + +void action_fifos_exit(void) +{ + free_percpu(action_fifos); } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 64dc864a417f..9e3a2fae6a8f 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -157,7 +157,7 @@ static struct datapath *get_dp(struct net *net, int dp_ifindex) } /* Must be called with rcu_read_lock or ovs_mutex. */ -static const char *ovs_dp_name(const struct datapath *dp) +const char *ovs_dp_name(const struct datapath *dp) { struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL); return vport->ops->get_name(vport); @@ -238,32 +238,25 @@ void ovs_dp_detach_port(struct vport *p) } /* Must be called with rcu_read_lock. */ -void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) +void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) { + const struct vport *p = OVS_CB(skb)->input_vport; struct datapath *dp = p->dp; struct sw_flow *flow; struct dp_stats_percpu *stats; - struct sw_flow_key key; u64 *stats_counter; u32 n_mask_hit; - int error; stats = this_cpu_ptr(dp->stats_percpu); - /* Extract flow from 'skb' into 'key'. */ - error = ovs_flow_extract(skb, p->port_no, &key); - if (unlikely(error)) { - kfree_skb(skb); - return; - } - /* Look up flow. */ - flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit); + flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit); if (unlikely(!flow)) { struct dp_upcall_info upcall; + int error; upcall.cmd = OVS_PACKET_CMD_MISS; - upcall.key = &key; + upcall.key = key; upcall.userdata = NULL; upcall.portid = ovs_vport_find_upcall_portid(p, skb); error = ovs_dp_upcall(dp, skb, &upcall); @@ -276,10 +269,9 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) } OVS_CB(skb)->flow = flow; - OVS_CB(skb)->pkt_key = &key; - ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb); - ovs_execute_actions(dp, skb); + ovs_flow_stats_update(OVS_CB(skb)->flow, key->tp.flags, skb); + ovs_execute_actions(dp, skb, key); stats_counter = &stats->n_hit; out: @@ -516,6 +508,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) struct sw_flow *flow; struct datapath *dp; struct ethhdr *eth; + struct vport *input_vport; int len; int err; @@ -550,13 +543,11 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(flow)) goto err_kfree_skb; - err = ovs_flow_extract(packet, -1, &flow->key); + err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet, + &flow->key); if (err) goto err_flow_free; - err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]); - if (err) - goto err_flow_free; acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); err = PTR_ERR(acts); if (IS_ERR(acts)) @@ -569,7 +560,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) goto err_flow_free; OVS_CB(packet)->flow = flow; - OVS_CB(packet)->pkt_key = &flow->key; packet->priority = flow->key.phy.priority; packet->mark = flow->key.phy.skb_mark; @@ -579,8 +569,17 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) if (!dp) goto err_unlock; + input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port); + if (!input_vport) + input_vport = ovs_vport_rcu(dp, OVSP_LOCAL); + + if (!input_vport) + goto err_unlock; + + OVS_CB(packet)->input_vport = input_vport; + local_bh_disable(); - err = ovs_execute_actions(dp, packet); + err = ovs_execute_actions(dp, packet, &flow->key); local_bh_enable(); rcu_read_unlock(); @@ -2067,10 +2066,14 @@ static int __init dp_init(void) pr_info("Open vSwitch switching datapath\n"); - err = ovs_internal_dev_rtnl_link_register(); + err = action_fifos_init(); if (err) goto error; + err = ovs_internal_dev_rtnl_link_register(); + if (err) + goto error_action_fifos_exit; + err = ovs_flow_init(); if (err) goto error_unreg_rtnl_link; @@ -2103,6 +2106,8 @@ error_flow_exit: ovs_flow_exit(); error_unreg_rtnl_link: ovs_internal_dev_rtnl_link_unregister(); +error_action_fifos_exit: + action_fifos_exit(); error: return err; } @@ -2116,6 +2121,7 @@ static void dp_cleanup(void) ovs_vport_exit(); ovs_flow_exit(); ovs_internal_dev_rtnl_link_unregister(); + action_fifos_exit(); } module_init(dp_init); diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 701b5738c38a..ac3f3df96961 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2012 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -95,14 +95,15 @@ struct datapath { /** * struct ovs_skb_cb - OVS data in skb CB * @flow: The flow associated with this packet. May be %NULL if no flow. - * @pkt_key: The flow information extracted from the packet. Must be nonnull. - * @tun_key: Key for the tunnel that encapsulated this packet. NULL if the - * packet is not being tunneled. + * @egress_tun_key: Tunnel information about this packet on egress path. + * NULL if the packet is not being tunneled. + * @input_vport: The original vport packet came in on. This value is cached + * when a packet is received by OVS. */ struct ovs_skb_cb { struct sw_flow *flow; - struct sw_flow_key *pkt_key; - struct ovs_key_ipv4_tunnel *tun_key; + struct vport *input_vport; + struct ovs_key_ipv4_tunnel *egress_tun_key; }; #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) @@ -183,17 +184,23 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n extern struct notifier_block ovs_dp_device_notifier; extern struct genl_family dp_vport_genl_family; -void ovs_dp_process_received_packet(struct vport *, struct sk_buff *); +void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key); void ovs_dp_detach_port(struct vport *); int ovs_dp_upcall(struct datapath *, struct sk_buff *, const struct dp_upcall_info *); +const char *ovs_dp_name(const struct datapath *dp); struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, u8 cmd); -int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb); +int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, + struct sw_flow_key *); + void ovs_dp_notify_wq(struct work_struct *work); +int action_fifos_init(void); +void action_fifos_exit(void); + #define OVS_NLERR(fmt, ...) \ do { \ if (net_ratelimit()) \ diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index d07ab538fc9d..4010423f2831 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -16,8 +16,6 @@ * 02110-1301, USA */ -#include "flow.h" -#include "datapath.h" #include <linux/uaccess.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -46,6 +44,10 @@ #include <net/ipv6.h> #include <net/ndisc.h> +#include "datapath.h" +#include "flow.h" +#include "flow_netlink.h" + u64 ovs_flow_used_time(unsigned long flow_jiffies) { struct timespec cur_ts; @@ -89,7 +91,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, * allocated stats as we have already locked them. */ if (likely(flow->stats_last_writer != NUMA_NO_NODE) - && likely(!rcu_dereference(flow->stats[node]))) { + && likely(!rcu_access_pointer(flow->stats[node]))) { /* Try to allocate node-specific stats. */ struct flow_stats *new_stats; @@ -420,10 +422,9 @@ invalid: } /** - * ovs_flow_extract - extracts a flow key from an Ethernet frame. + * key_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header - * @in_port: port number on which @skb was received. * @key: output flow key * * The caller must ensure that skb->len >= ETH_HLEN. @@ -442,19 +443,11 @@ invalid: * of a correct length, otherwise the same as skb->network_header. * For other key->eth.type values it is left untouched. */ -int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) +static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) { int error; struct ethhdr *eth; - memset(key, 0, sizeof(*key)); - - key->phy.priority = skb->priority; - if (OVS_CB(skb)->tun_key) - memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key)); - key->phy.in_port = in_port; - key->phy.skb_mark = skb->mark; - skb_reset_mac_header(skb); /* Link layer. We are guaranteed to have at least the 14 byte Ethernet @@ -610,6 +603,40 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) } } } - return 0; } + +int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) +{ + return key_extract(skb, key); +} + +int ovs_flow_key_extract(struct ovs_key_ipv4_tunnel *tun_key, + struct sk_buff *skb, struct sw_flow_key *key) +{ + /* Extract metadata from packet. */ + memset(key, 0, sizeof(*key)); + if (tun_key) + memcpy(&key->tun_key, tun_key, sizeof(key->tun_key)); + + key->phy.priority = skb->priority; + key->phy.in_port = OVS_CB(skb)->input_vport->port_no; + key->phy.skb_mark = skb->mark; + + return key_extract(skb, key); +} + +int ovs_flow_key_extract_userspace(const struct nlattr *attr, + struct sk_buff *skb, + struct sw_flow_key *key) +{ + int err; + + memset(key, 0, sizeof(*key)); + /* Extract metadata from netlink attributes. */ + err = ovs_nla_get_flow_metadata(attr, key); + if (err) + return err; + + return key_extract(skb, key); +} diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 5e5aaed3a85b..0f5db4ec565d 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -72,6 +72,8 @@ struct sw_flow_key { u32 skb_mark; /* SKB mark. */ u16 in_port; /* Input switch port (or DP_MAX_PORTS). */ } __packed phy; /* Safe when right after 'tun_key'. */ + u32 ovs_flow_hash; /* Datapath computed hash value. */ + u32 recirc_id; /* Recirculation ID. */ struct { u8 src[ETH_ALEN]; /* Ethernet source address. */ u8 dst[ETH_ALEN]; /* Ethernet destination address. */ @@ -187,6 +189,12 @@ void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, void ovs_flow_stats_clear(struct sw_flow *); u64 ovs_flow_used_time(unsigned long flow_jiffies); -int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); +int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); +int ovs_flow_key_extract(struct ovs_key_ipv4_tunnel *tun_key, + struct sk_buff *skb, struct sw_flow_key *key); +/* Extract key from packet coming from userspace. */ +int ovs_flow_key_extract_userspace(const struct nlattr *attr, + struct sk_buff *skb, + struct sw_flow_key *key); #endif /* flow.h */ diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index d757848da89c..f4c8daa73965 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -251,6 +251,8 @@ static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), + [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32), + [OVS_KEY_ATTR_DP_HASH] = sizeof(u32), [OVS_KEY_ATTR_TUNNEL] = -1, }; @@ -454,6 +456,20 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb, static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, const struct nlattr **a, bool is_mask) { + if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) { + u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]); + + SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask); + *attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH); + } + + if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) { + u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]); + + SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask); + *attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID); + } + if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { SW_FLOW_KEY_PUT(match, phy.priority, nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask); @@ -836,7 +852,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, /** * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key. - * @flow: Receives extracted in_port, priority, tun_key and skb_mark. + * @key: Receives extracted in_port, priority, tun_key and skb_mark. * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. * @@ -846,32 +862,24 @@ int ovs_nla_get_match(struct sw_flow_match *match, * extracted from the packet itself. */ -int ovs_nla_get_flow_metadata(struct sw_flow *flow, - const struct nlattr *attr) +int ovs_nla_get_flow_metadata(const struct nlattr *attr, + struct sw_flow_key *key) { - struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; + struct sw_flow_match match; u64 attrs = 0; int err; - struct sw_flow_match match; - - flow->key.phy.in_port = DP_MAX_PORTS; - flow->key.phy.priority = 0; - flow->key.phy.skb_mark = 0; - memset(tun_key, 0, sizeof(flow->key.tun_key)); err = parse_flow_nlattrs(attr, a, &attrs); if (err) return -EINVAL; memset(&match, 0, sizeof(match)); - match.key = &flow->key; + match.key = key; - err = metadata_from_nlattrs(&match, &attrs, a, false); - if (err) - return err; + key->phy.in_port = DP_MAX_PORTS; - return 0; + return metadata_from_nlattrs(&match, &attrs, a, false); } int ovs_nla_put_flow(const struct sw_flow_key *swkey, @@ -881,6 +889,12 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey, struct nlattr *nla, *encap; bool is_mask = (swkey != output); + if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id)) + goto nla_put_failure; + + if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash)) + goto nla_put_failure; + if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) goto nla_put_failure; @@ -1409,11 +1423,13 @@ int ovs_nla_copy_actions(const struct nlattr *attr, /* Expected argument lengths, (u32)-1 for variable length. */ static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), + [OVS_ACTION_ATTR_RECIRC] = sizeof(u32), [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), [OVS_ACTION_ATTR_POP_VLAN] = 0, [OVS_ACTION_ATTR_SET] = (u32)-1, - [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 + [OVS_ACTION_ATTR_SAMPLE] = (u32)-1, + [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash) }; const struct ovs_action_push_vlan *vlan; int type = nla_type(a); @@ -1440,6 +1456,18 @@ int ovs_nla_copy_actions(const struct nlattr *attr, return -EINVAL; break; + case OVS_ACTION_ATTR_HASH: { + const struct ovs_action_hash *act_hash = nla_data(a); + + switch (act_hash->hash_alg) { + case OVS_HASH_ALG_L4: + break; + default: + return -EINVAL; + } + + break; + } case OVS_ACTION_ATTR_POP_VLAN: break; @@ -1452,6 +1480,9 @@ int ovs_nla_copy_actions(const struct nlattr *attr, return -EINVAL; break; + case OVS_ACTION_ATTR_RECIRC: + break; + case OVS_ACTION_ATTR_SET: err = validate_set(a, key, sfa, &skip_copy); if (err) diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h index 440151045d39..206e45add888 100644 --- a/net/openvswitch/flow_netlink.h +++ b/net/openvswitch/flow_netlink.h @@ -42,8 +42,8 @@ void ovs_match_init(struct sw_flow_match *match, int ovs_nla_put_flow(const struct sw_flow_key *, const struct sw_flow_key *, struct sk_buff *); -int ovs_nla_get_flow_metadata(struct sw_flow *flow, - const struct nlattr *attr); +int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *); + int ovs_nla_get_match(struct sw_flow_match *match, const struct nlattr *, const struct nlattr *); diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index f49148a07da2..309cca6e816f 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -63,7 +63,7 @@ static __be16 filter_tnl_flags(__be16 flags) static struct sk_buff *__build_header(struct sk_buff *skb, int tunnel_hlen) { - const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key; + const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->egress_tun_key; struct tnl_ptk_info tpi; skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); @@ -129,6 +129,7 @@ static int gre_err(struct sk_buff *skb, u32 info, static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) { struct net *net = ovs_dp_get_net(vport->dp); + struct ovs_key_ipv4_tunnel *tun_key; struct flowi4 fl; struct rtable *rt; int min_headroom; @@ -136,16 +137,17 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) __be16 df; int err; - if (unlikely(!OVS_CB(skb)->tun_key)) { + if (unlikely(!OVS_CB(skb)->egress_tun_key)) { err = -EINVAL; goto error; } + tun_key = OVS_CB(skb)->egress_tun_key; /* Route lookup */ memset(&fl, 0, sizeof(fl)); - fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst; - fl.saddr = OVS_CB(skb)->tun_key->ipv4_src; - fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos); + fl.daddr = tun_key->ipv4_dst; + fl.saddr = tun_key->ipv4_src; + fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos); fl.flowi4_mark = skb->mark; fl.flowi4_proto = IPPROTO_GRE; @@ -153,7 +155,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) if (IS_ERR(rt)) return PTR_ERR(rt); - tunnel_hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags); + tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags); min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + tunnel_hlen + sizeof(struct iphdr) @@ -185,15 +187,14 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) goto err_free_rt; } - df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? + df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; skb->ignore_df = 1; return iptunnel_xmit(skb->sk, rt, skb, fl.saddr, - OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE, - OVS_CB(skb)->tun_key->ipv4_tos, - OVS_CB(skb)->tun_key->ipv4_ttl, df, false); + tun_key->ipv4_dst, IPPROTO_GRE, + tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false); err_free_rt: ip_rt_put(rt); error: diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index d8b7e247bebf..f19539bb8adc 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Nicira, Inc. + * Copyright (c) 2014 Nicira, Inc. * Copyright (c) 2013 Cisco Systems, Inc. * * This program is free software; you can redistribute it and/or @@ -140,22 +140,24 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) struct net *net = ovs_dp_get_net(vport->dp); struct vxlan_port *vxlan_port = vxlan_vport(vport); __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; + struct ovs_key_ipv4_tunnel *tun_key; struct rtable *rt; struct flowi4 fl; __be16 src_port; __be16 df; int err; - if (unlikely(!OVS_CB(skb)->tun_key)) { + if (unlikely(!OVS_CB(skb)->egress_tun_key)) { err = -EINVAL; goto error; } + tun_key = OVS_CB(skb)->egress_tun_key; /* Route lookup */ memset(&fl, 0, sizeof(fl)); - fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst; - fl.saddr = OVS_CB(skb)->tun_key->ipv4_src; - fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos); + fl.daddr = tun_key->ipv4_dst; + fl.saddr = tun_key->ipv4_src; + fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos); fl.flowi4_mark = skb->mark; fl.flowi4_proto = IPPROTO_UDP; @@ -165,7 +167,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) goto error; } - df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? + df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; skb->ignore_df = 1; @@ -173,11 +175,10 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) src_port = udp_flow_src_port(net, skb, 0, 0, true); err = vxlan_xmit_skb(vxlan_port->vs, rt, skb, - fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst, - OVS_CB(skb)->tun_key->ipv4_tos, - OVS_CB(skb)->tun_key->ipv4_ttl, df, + fl.saddr, tun_key->ipv4_dst, + tun_key->ipv4_tos, tun_key->ipv4_ttl, df, src_port, dst_port, - htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8), + htonl(be64_to_cpu(tun_key->tun_id) << 8), false); if (err < 0) ip_rt_put(rt); diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 6d8f2ec481d9..5df8377fcfb1 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2012 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -148,8 +148,6 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, return ERR_PTR(-ENOMEM); } - spin_lock_init(&vport->stats_lock); - return vport; } @@ -268,14 +266,10 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) * netdev-stats can be directly read over netlink-ioctl. */ - spin_lock_bh(&vport->stats_lock); - - stats->rx_errors = vport->err_stats.rx_errors; - stats->tx_errors = vport->err_stats.tx_errors; - stats->tx_dropped = vport->err_stats.tx_dropped; - stats->rx_dropped = vport->err_stats.rx_dropped; - - spin_unlock_bh(&vport->stats_lock); + stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors); + stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors); + stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped); + stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped); for_each_possible_cpu(i) { const struct pcpu_sw_netstats *percpu_stats; @@ -441,6 +435,8 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, struct ovs_key_ipv4_tunnel *tun_key) { struct pcpu_sw_netstats *stats; + struct sw_flow_key key; + int error; stats = this_cpu_ptr(vport->percpu_stats); u64_stats_update_begin(&stats->syncp); @@ -448,8 +444,15 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, stats->rx_bytes += skb->len; u64_stats_update_end(&stats->syncp); - OVS_CB(skb)->tun_key = tun_key; - ovs_dp_process_received_packet(vport, skb); + OVS_CB(skb)->input_vport = vport; + OVS_CB(skb)->egress_tun_key = NULL; + /* Extract flow from 'skb' into 'key'. */ + error = ovs_flow_key_extract(tun_key, skb, &key); + if (unlikely(error)) { + kfree_skb(skb); + return; + } + ovs_dp_process_packet(skb, &key); } /** @@ -495,27 +498,24 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb) static void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type) { - spin_lock(&vport->stats_lock); - switch (err_type) { case VPORT_E_RX_DROPPED: - vport->err_stats.rx_dropped++; + atomic_long_inc(&vport->err_stats.rx_dropped); break; case VPORT_E_RX_ERROR: - vport->err_stats.rx_errors++; + atomic_long_inc(&vport->err_stats.rx_errors); break; case VPORT_E_TX_DROPPED: - vport->err_stats.tx_dropped++; + atomic_long_inc(&vport->err_stats.tx_dropped); break; case VPORT_E_TX_ERROR: - vport->err_stats.tx_errors++; + atomic_long_inc(&vport->err_stats.tx_errors); break; } - spin_unlock(&vport->stats_lock); } static void free_vport_rcu(struct rcu_head *rcu) diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 35f89d84b45e..0d95b9f5f9c4 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -62,10 +62,10 @@ int ovs_vport_send(struct vport *, struct sk_buff *); /* The following definitions are for implementers of vport devices: */ struct vport_err_stats { - u64 rx_dropped; - u64 rx_errors; - u64 tx_dropped; - u64 tx_errors; + atomic_long_t rx_dropped; + atomic_long_t rx_errors; + atomic_long_t tx_dropped; + atomic_long_t tx_errors; }; /** * struct vport_portids - array of netlink portids of a vport. @@ -93,7 +93,6 @@ struct vport_portids { * @dp_hash_node: Element in @datapath->ports hash table in datapath.c. * @ops: Class structure. * @percpu_stats: Points to per-CPU statistics used and maintained by vport - * @stats_lock: Protects @err_stats; * @err_stats: Points to error statistics used and maintained by vport */ struct vport { @@ -108,7 +107,6 @@ struct vport { struct pcpu_sw_netstats __percpu *percpu_stats; - spinlock_t stats_lock; struct vport_err_stats err_stats; }; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 93896d2092f6..87d20f48ff06 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -240,11 +240,9 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po); static int packet_direct_xmit(struct sk_buff *skb) { struct net_device *dev = skb->dev; - const struct net_device_ops *ops = dev->netdev_ops; netdev_features_t features; struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; - u16 queue_map; if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev))) @@ -255,17 +253,13 @@ static int packet_direct_xmit(struct sk_buff *skb) __skb_linearize(skb)) goto drop; - queue_map = skb_get_queue_mapping(skb); - txq = netdev_get_tx_queue(dev, queue_map); + txq = skb_get_tx_queue(dev, skb); local_bh_disable(); HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_xmit_frozen_or_drv_stopped(txq)) { - ret = ops->ndo_start_xmit(skb, dev); - if (ret == NETDEV_TX_OK) - txq_trans_update(txq); - } + if (!netif_xmit_frozen_or_drv_stopped(txq)) + ret = netdev_start_xmit(skb, dev, txq, false); HARD_TX_UNLOCK(dev, txq); local_bh_enable(); diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c index bc5514211b0c..e873d7d9f857 100644 --- a/net/rose/rose_link.c +++ b/net/rose/rose_link.c @@ -160,7 +160,8 @@ void rose_link_rx_restart(struct sk_buff *skb, struct rose_neigh *neigh, unsigne break; case ROSE_DIAGNOSTIC: - printk(KERN_WARNING "ROSE: received diagnostic #%d - %02X %02X %02X\n", skb->data[3], skb->data[4], skb->data[5], skb->data[6]); + pr_warn("ROSE: received diagnostic #%d - %3ph\n", skb->data[3], + skb->data + 4); break; default: diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c index db57458c824c..74c0fcd36838 100644 --- a/net/rxrpc/ar-error.c +++ b/net/rxrpc/ar-error.c @@ -37,7 +37,7 @@ void rxrpc_UDP_error_report(struct sock *sk) _enter("%p{%d}", sk, local->debug_id); - skb = skb_dequeue(&sk->sk_error_queue); + skb = sock_dequeue_err_skb(sk); if (!skb) { _leave("UDP socket errqueue empty"); return; @@ -111,18 +111,6 @@ void rxrpc_UDP_error_report(struct sock *sk) skb_queue_tail(&trans->error_queue, skb); rxrpc_queue_work(&trans->error_handler); - /* reset and regenerate socket error */ - spin_lock_bh(&sk->sk_error_queue.lock); - sk->sk_err = 0; - skb = skb_peek(&sk->sk_error_queue); - if (skb) { - sk->sk_err = SKB_EXT_ERR(skb)->ee.ee_errno; - spin_unlock_bh(&sk->sk_error_queue.lock); - sk->sk_error_report(sk); - } else { - spin_unlock_bh(&sk->sk_error_queue.lock); - } - _leave(""); } diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 63b21e580de9..481f89f93789 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c @@ -45,7 +45,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, struct rxrpc_skb_priv *sp; struct rxrpc_sock *rx = call->socket; struct sock *sk; - int skb_len, ret; + int ret; _enter(",,%d,%d", force, terminal); @@ -101,13 +101,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, rx->interceptor(sk, call->user_call_ID, skb); spin_unlock_bh(&sk->sk_receive_queue.lock); } else { - - /* Cache the SKB length before we tack it onto the - * receive queue. Once it is added it no longer - * belongs to us and may be freed by other threads of - * control pulling packets from the queue */ - skb_len = skb->len; - _net("post skb %p", skb); __skb_queue_tail(&sk->sk_receive_queue, skb); spin_unlock_bh(&sk->sk_receive_queue.lock); diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 0566e4606a4a..f32bcb094915 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -231,7 +231,7 @@ override: if (ret != ACT_P_CREATED) return ret; - police->tcfp_t_c = ktime_to_ns(ktime_get()); + police->tcfp_t_c = ktime_get_ns(); police->tcf_index = parm->index ? parm->index : tcf_hash_new_index(hinfo); h = tcf_hash(police->tcf_index, POL_TAB_MASK); @@ -279,7 +279,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a, return police->tcfp_result; } - now = ktime_to_ns(ktime_get()); + now = ktime_get_ns(); toks = min_t(s64, now - police->tcfp_t_c, police->tcfp_burst); if (police->peak_present) { diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index c28b0d327b12..e547efdaba9d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -117,7 +117,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; - spinlock_t *root_lock; struct tcmsg *t; u32 protocol; u32 prio; @@ -125,7 +124,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) u32 parent; struct net_device *dev; struct Qdisc *q; - struct tcf_proto **back, **chain; + struct tcf_proto __rcu **back; + struct tcf_proto __rcu **chain; struct tcf_proto *tp; const struct tcf_proto_ops *tp_ops; const struct Qdisc_class_ops *cops; @@ -197,7 +197,9 @@ replay: goto errout; /* Check the chain for existence of proto-tcf with this priority */ - for (back = chain; (tp = *back) != NULL; back = &tp->next) { + for (back = chain; + (tp = rtnl_dereference(*back)) != NULL; + back = &tp->next) { if (tp->prio >= prio) { if (tp->prio == prio) { if (!nprio || @@ -209,8 +211,6 @@ replay: } } - root_lock = qdisc_root_sleeping_lock(q); - if (tp == NULL) { /* Proto-tcf does not exist, create new one */ @@ -259,7 +259,8 @@ replay: } tp->ops = tp_ops; tp->protocol = protocol; - tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back)); + tp->prio = nprio ? : + TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back))); tp->q = q; tp->classify = tp_ops->classify; tp->classid = parent; @@ -280,9 +281,9 @@ replay: if (fh == 0) { if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { - spin_lock_bh(root_lock); - *back = tp->next; - spin_unlock_bh(root_lock); + struct tcf_proto *next = rtnl_dereference(tp->next); + + RCU_INIT_POINTER(*back, next); tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER); tcf_destroy(tp); @@ -322,10 +323,8 @@ replay: n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); if (err == 0) { if (tp_created) { - spin_lock_bh(root_lock); - tp->next = *back; - *back = tp; - spin_unlock_bh(root_lock); + RCU_INIT_POINTER(tp->next, rtnl_dereference(*back)); + rcu_assign_pointer(*back, tp); } tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER); } else { @@ -420,7 +419,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) int s_t; struct net_device *dev; struct Qdisc *q; - struct tcf_proto *tp, **chain; + struct tcf_proto *tp, __rcu **chain; struct tcmsg *tcm = nlmsg_data(cb->nlh); unsigned long cl = 0; const struct Qdisc_class_ops *cops; @@ -454,7 +453,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; - for (tp = *chain, t = 0; tp; tp = tp->next, t++) { + for (tp = rtnl_dereference(*chain), t = 0; + tp; tp = rtnl_dereference(tp->next), t++) { if (t < s_t) continue; if (TC_H_MAJ(tcm->tcm_info) && diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 0ae1813e3e90..1937298d6775 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -24,6 +24,7 @@ struct basic_head { u32 hgenerator; struct list_head flist; + struct rcu_head rcu; }; struct basic_filter { @@ -31,17 +32,19 @@ struct basic_filter { struct tcf_exts exts; struct tcf_ematch_tree ematches; struct tcf_result res; + struct tcf_proto *tp; struct list_head link; + struct rcu_head rcu; }; static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { int r; - struct basic_head *head = tp->root; + struct basic_head *head = rcu_dereference_bh(tp->root); struct basic_filter *f; - list_for_each_entry(f, &head->flist, link) { + list_for_each_entry_rcu(f, &head->flist, link) { if (!tcf_em_tree_match(skb, &f->ematches, NULL)) continue; *res = f->res; @@ -56,7 +59,7 @@ static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, static unsigned long basic_get(struct tcf_proto *tp, u32 handle) { unsigned long l = 0UL; - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f; if (head == NULL) @@ -81,12 +84,15 @@ static int basic_init(struct tcf_proto *tp) if (head == NULL) return -ENOBUFS; INIT_LIST_HEAD(&head->flist); - tp->root = head; + rcu_assign_pointer(tp->root, head); return 0; } -static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f) +static void basic_delete_filter(struct rcu_head *head) { + struct basic_filter *f = container_of(head, struct basic_filter, rcu); + struct tcf_proto *tp = f->tp; + tcf_unbind_filter(tp, &f->res); tcf_exts_destroy(tp, &f->exts); tcf_em_tree_destroy(tp, &f->ematches); @@ -95,27 +101,26 @@ static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f) static void basic_destroy(struct tcf_proto *tp) { - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f, *n; list_for_each_entry_safe(f, n, &head->flist, link) { - list_del(&f->link); - basic_delete_filter(tp, f); + list_del_rcu(&f->link); + call_rcu(&f->rcu, basic_delete_filter); } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static int basic_delete(struct tcf_proto *tp, unsigned long arg) { - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *t, *f = (struct basic_filter *) arg; list_for_each_entry(t, &head->flist, link) if (t == f) { - tcf_tree_lock(tp); - list_del(&t->link); - tcf_tree_unlock(tp); - basic_delete_filter(tp, t); + list_del_rcu(&t->link); + call_rcu(&t->rcu, basic_delete_filter); return 0; } @@ -152,6 +157,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, tcf_exts_change(tp, &f->exts, &e); tcf_em_tree_change(tp, &f->ematches, &t); + f->tp = tp; return 0; errout: @@ -164,9 +170,10 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, struct nlattr **tca, unsigned long *arg, bool ovr) { int err; - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct nlattr *tb[TCA_BASIC_MAX + 1]; - struct basic_filter *f = (struct basic_filter *) *arg; + struct basic_filter *fold = (struct basic_filter *) *arg; + struct basic_filter *fnew; if (tca[TCA_OPTIONS] == NULL) return -EINVAL; @@ -176,22 +183,23 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; - if (f != NULL) { - if (handle && f->handle != handle) + if (fold != NULL) { + if (handle && fold->handle != handle) return -EINVAL; - return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); } err = -ENOBUFS; - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (f == NULL) + fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); + if (fnew == NULL) goto errout; - tcf_exts_init(&f->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE); + tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE); err = -EINVAL; - if (handle) - f->handle = handle; - else { + if (handle) { + fnew->handle = handle; + } else if (fold) { + fnew->handle = fold->handle; + } else { unsigned int i = 0x80000000; do { if (++head->hgenerator == 0x7FFFFFFF) @@ -203,29 +211,31 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, goto errout; } - f->handle = head->hgenerator; + fnew->handle = head->hgenerator; } - err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr); + err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr); if (err < 0) goto errout; - tcf_tree_lock(tp); - list_add(&f->link, &head->flist); - tcf_tree_unlock(tp); - *arg = (unsigned long) f; + *arg = (unsigned long)fnew; + + if (fold) { + list_replace_rcu(&fold->link, &fnew->link); + call_rcu(&fold->rcu, basic_delete_filter); + } else { + list_add_rcu(&fnew->link, &head->flist); + } return 0; errout: - if (*arg == 0UL && f) - kfree(f); - + kfree(fnew); return err; } static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct basic_head *head = tp->root; + struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f; list_for_each_entry(f, &head->flist, link) { diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 0e30d58149da..4e3f5bfc0b26 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -27,6 +27,7 @@ MODULE_DESCRIPTION("TC BPF based classifier"); struct cls_bpf_head { struct list_head plist; u32 hgen; + struct rcu_head rcu; }; struct cls_bpf_prog { @@ -37,6 +38,8 @@ struct cls_bpf_prog { struct list_head link; u32 handle; u16 bpf_len; + struct tcf_proto *tp; + struct rcu_head rcu; }; static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { @@ -49,11 +52,11 @@ static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rcu_dereference_bh(tp->root); struct cls_bpf_prog *prog; int ret; - list_for_each_entry(prog, &head->plist, link) { + list_for_each_entry_rcu(prog, &head->plist, link) { int filter_res = BPF_PROG_RUN(prog->filter, skb); if (filter_res == 0) @@ -81,8 +84,8 @@ static int cls_bpf_init(struct tcf_proto *tp) if (head == NULL) return -ENOBUFS; - INIT_LIST_HEAD(&head->plist); - tp->root = head; + INIT_LIST_HEAD_RCU(&head->plist); + rcu_assign_pointer(tp->root, head); return 0; } @@ -98,18 +101,22 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog) kfree(prog); } +static void __cls_bpf_delete_prog(struct rcu_head *rcu) +{ + struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu); + + cls_bpf_delete_prog(prog->tp, prog); +} + static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg; list_for_each_entry(prog, &head->plist, link) { if (prog == todel) { - tcf_tree_lock(tp); - list_del(&prog->link); - tcf_tree_unlock(tp); - - cls_bpf_delete_prog(tp, prog); + list_del_rcu(&prog->link); + call_rcu(&prog->rcu, __cls_bpf_delete_prog); return 0; } } @@ -119,27 +126,28 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) static void cls_bpf_destroy(struct tcf_proto *tp) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog, *tmp; list_for_each_entry_safe(prog, tmp, &head->plist, link) { - list_del(&prog->link); - cls_bpf_delete_prog(tp, prog); + list_del_rcu(&prog->link); + call_rcu(&prog->rcu, __cls_bpf_delete_prog); } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog; unsigned long ret = 0UL; if (head == NULL) return 0UL; - list_for_each_entry(prog, &head->plist, link) { + list_for_each_entry_rcu(prog, &head->plist, link) { if (prog->handle == handle) { ret = (unsigned long) prog; break; @@ -158,10 +166,10 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, unsigned long base, struct nlattr **tb, struct nlattr *est, bool ovr) { - struct sock_filter *bpf_ops, *bpf_old; + struct sock_filter *bpf_ops; struct tcf_exts exts; struct sock_fprog_kern tmp; - struct bpf_prog *fp, *fp_old; + struct bpf_prog *fp; u16 bpf_size, bpf_len; u32 classid; int ret; @@ -197,26 +205,15 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, if (ret) goto errout_free; - tcf_tree_lock(tp); - fp_old = prog->filter; - bpf_old = prog->bpf_ops; - prog->bpf_len = bpf_len; prog->bpf_ops = bpf_ops; prog->filter = fp; prog->res.classid = classid; - tcf_tree_unlock(tp); tcf_bind_filter(tp, &prog->res, base); tcf_exts_change(tp, &prog->exts, &exts); - if (fp_old) - bpf_prog_destroy(fp_old); - if (bpf_old) - kfree(bpf_old); - return 0; - errout_free: kfree(bpf_ops); errout: @@ -244,9 +241,10 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, u32 handle, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct cls_bpf_head *head = tp->root; - struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg; + struct cls_bpf_head *head = rtnl_dereference(tp->root); + struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg; struct nlattr *tb[TCA_BPF_MAX + 1]; + struct cls_bpf_prog *prog; int ret; if (tca[TCA_OPTIONS] == NULL) @@ -256,18 +254,19 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, if (ret < 0) return ret; - if (prog != NULL) { - if (handle && prog->handle != handle) - return -EINVAL; - return cls_bpf_modify_existing(net, tp, prog, base, tb, - tca[TCA_RATE], ovr); - } - prog = kzalloc(sizeof(*prog), GFP_KERNEL); - if (prog == NULL) + if (!prog) return -ENOBUFS; tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); + + if (oldprog) { + if (handle && oldprog->handle != handle) { + ret = -EINVAL; + goto errout; + } + } + if (handle == 0) prog->handle = cls_bpf_grab_new_handle(tp, head); else @@ -281,16 +280,17 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, if (ret < 0) goto errout; - tcf_tree_lock(tp); - list_add(&prog->link, &head->plist); - tcf_tree_unlock(tp); + if (oldprog) { + list_replace_rcu(&prog->link, &oldprog->link); + call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); + } else { + list_add_rcu(&prog->link, &head->plist); + } *arg = (unsigned long) prog; - return 0; errout: - if (*arg == 0UL && prog) - kfree(prog); + kfree(prog); return ret; } @@ -339,10 +339,10 @@ nla_put_failure: static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct cls_bpf_head *head = tp->root; + struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog; - list_for_each_entry(prog, &head->plist, link) { + list_for_each_entry_rcu(prog, &head->plist, link) { if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long) prog, arg) < 0) { diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index cacf01bd04f0..15c34d4ccd9e 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -22,17 +22,17 @@ struct cls_cgroup_head { u32 handle; struct tcf_exts exts; struct tcf_ematch_tree ematches; + struct tcf_proto *tp; + struct rcu_head rcu; }; static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); u32 classid; - rcu_read_lock(); classid = task_cls_state(current)->classid; - rcu_read_unlock(); /* * Due to the nature of the classifier it is required to ignore all @@ -80,13 +80,25 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, }; +static void cls_cgroup_destroy_rcu(struct rcu_head *root) +{ + struct cls_cgroup_head *head = container_of(root, + struct cls_cgroup_head, + rcu); + + tcf_exts_destroy(head->tp, &head->exts); + tcf_em_tree_destroy(head->tp, &head->ematches); + kfree(head); +} + static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg, bool ovr) { struct nlattr *tb[TCA_CGROUP_MAX + 1]; - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rtnl_dereference(tp->root); + struct cls_cgroup_head *new; struct tcf_ematch_tree t; struct tcf_exts e; int err; @@ -94,53 +106,60 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, if (!tca[TCA_OPTIONS]) return -EINVAL; - if (head == NULL) { - if (!handle) - return -EINVAL; - - head = kzalloc(sizeof(*head), GFP_KERNEL); - if (head == NULL) - return -ENOBUFS; + if (!head && !handle) + return -EINVAL; - tcf_exts_init(&head->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); - head->handle = handle; + if (head && handle != head->handle) + return -ENOENT; - tcf_tree_lock(tp); - tp->root = head; - tcf_tree_unlock(tp); - } + new = kzalloc(sizeof(*head), GFP_KERNEL); + if (!new) + return -ENOBUFS; - if (handle != head->handle) - return -ENOENT; + tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); + if (head) + new->handle = head->handle; + else + new->handle = handle; + new->tp = tp; err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS], cgroup_policy); if (err < 0) - return err; + goto errout; tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); if (err < 0) - return err; + goto errout; err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); - if (err < 0) - return err; + if (err < 0) { + tcf_exts_destroy(tp, &e); + goto errout; + } - tcf_exts_change(tp, &head->exts, &e); - tcf_em_tree_change(tp, &head->ematches, &t); + tcf_exts_change(tp, &new->exts, &e); + tcf_em_tree_change(tp, &new->ematches, &t); + rcu_assign_pointer(tp->root, new); + if (head) + call_rcu(&head->rcu, cls_cgroup_destroy_rcu); return 0; +errout: + kfree(new); + return err; } static void cls_cgroup_destroy(struct tcf_proto *tp) { - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rtnl_dereference(tp->root); if (head) { tcf_exts_destroy(tp, &head->exts); tcf_em_tree_destroy(tp, &head->ematches); - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } } @@ -151,7 +170,7 @@ static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg) static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rtnl_dereference(tp->root); if (arg->count < arg->skip) goto skip; @@ -167,7 +186,7 @@ skip: static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { - struct cls_cgroup_head *head = tp->root; + struct cls_cgroup_head *head = rtnl_dereference(tp->root); unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 35be16f7c192..95736fa479f3 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -34,12 +34,14 @@ struct flow_head { struct list_head filters; + struct rcu_head rcu; }; struct flow_filter { struct list_head list; struct tcf_exts exts; struct tcf_ematch_tree ematches; + struct tcf_proto *tp; struct timer_list perturb_timer; u32 perturb_period; u32 handle; @@ -54,6 +56,7 @@ struct flow_filter { u32 divisor; u32 baseclass; u32 hashrnd; + struct rcu_head rcu; }; static inline u32 addr_fold(void *addr) @@ -276,14 +279,14 @@ static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct flow_head *head = tp->root; + struct flow_head *head = rcu_dereference_bh(tp->root); struct flow_filter *f; u32 keymask; u32 classid; unsigned int n, key; int r; - list_for_each_entry(f, &head->filters, list) { + list_for_each_entry_rcu(f, &head->filters, list) { u32 keys[FLOW_KEY_MAX + 1]; struct flow_keys flow_keys; @@ -346,13 +349,23 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, }; +static void flow_destroy_filter(struct rcu_head *head) +{ + struct flow_filter *f = container_of(head, struct flow_filter, rcu); + + del_timer_sync(&f->perturb_timer); + tcf_exts_destroy(f->tp, &f->exts); + tcf_em_tree_destroy(f->tp, &f->ematches); + kfree(f); +} + static int flow_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct flow_head *head = tp->root; - struct flow_filter *f; + struct flow_head *head = rtnl_dereference(tp->root); + struct flow_filter *fold, *fnew; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FLOW_MAX + 1]; struct tcf_exts e; @@ -401,20 +414,42 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto err1; - f = (struct flow_filter *)*arg; - if (f != NULL) { + err = -ENOBUFS; + fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); + if (!fnew) + goto err2; + + fold = (struct flow_filter *)*arg; + if (fold) { err = -EINVAL; - if (f->handle != handle && handle) + if (fold->handle != handle && handle) goto err2; - mode = f->mode; + /* Copy fold into fnew */ + fnew->handle = fold->handle; + fnew->keymask = fold->keymask; + fnew->tp = fold->tp; + + fnew->handle = fold->handle; + fnew->nkeys = fold->nkeys; + fnew->keymask = fold->keymask; + fnew->mode = fold->mode; + fnew->mask = fold->mask; + fnew->xor = fold->xor; + fnew->rshift = fold->rshift; + fnew->addend = fold->addend; + fnew->divisor = fold->divisor; + fnew->baseclass = fold->baseclass; + fnew->hashrnd = fold->hashrnd; + + mode = fold->mode; if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) goto err2; if (mode == FLOW_MODE_HASH) - perturb_period = f->perturb_period; + perturb_period = fold->perturb_period; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) goto err2; @@ -444,83 +479,70 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (TC_H_MIN(baseclass) == 0) baseclass = TC_H_MAKE(baseclass, 1); - err = -ENOBUFS; - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (f == NULL) - goto err2; - - f->handle = handle; - f->mask = ~0U; - tcf_exts_init(&f->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); - - get_random_bytes(&f->hashrnd, 4); - f->perturb_timer.function = flow_perturbation; - f->perturb_timer.data = (unsigned long)f; - init_timer_deferrable(&f->perturb_timer); + fnew->handle = handle; + fnew->mask = ~0U; + fnew->tp = tp; + get_random_bytes(&fnew->hashrnd, 4); + tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); } - tcf_exts_change(tp, &f->exts, &e); - tcf_em_tree_change(tp, &f->ematches, &t); + fnew->perturb_timer.function = flow_perturbation; + fnew->perturb_timer.data = (unsigned long)fnew; + init_timer_deferrable(&fnew->perturb_timer); - tcf_tree_lock(tp); + tcf_exts_change(tp, &fnew->exts, &e); + tcf_em_tree_change(tp, &fnew->ematches, &t); if (tb[TCA_FLOW_KEYS]) { - f->keymask = keymask; - f->nkeys = nkeys; + fnew->keymask = keymask; + fnew->nkeys = nkeys; } - f->mode = mode; + fnew->mode = mode; if (tb[TCA_FLOW_MASK]) - f->mask = nla_get_u32(tb[TCA_FLOW_MASK]); + fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]); if (tb[TCA_FLOW_XOR]) - f->xor = nla_get_u32(tb[TCA_FLOW_XOR]); + fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]); if (tb[TCA_FLOW_RSHIFT]) - f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); + fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); if (tb[TCA_FLOW_ADDEND]) - f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); + fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); if (tb[TCA_FLOW_DIVISOR]) - f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); + fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); if (baseclass) - f->baseclass = baseclass; + fnew->baseclass = baseclass; - f->perturb_period = perturb_period; - del_timer(&f->perturb_timer); + fnew->perturb_period = perturb_period; if (perturb_period) - mod_timer(&f->perturb_timer, jiffies + perturb_period); + mod_timer(&fnew->perturb_timer, jiffies + perturb_period); if (*arg == 0) - list_add_tail(&f->list, &head->filters); + list_add_tail_rcu(&fnew->list, &head->filters); + else + list_replace_rcu(&fnew->list, &fold->list); - tcf_tree_unlock(tp); + *arg = (unsigned long)fnew; - *arg = (unsigned long)f; + if (fold) + call_rcu(&fold->rcu, flow_destroy_filter); return 0; err2: tcf_em_tree_destroy(tp, &t); + kfree(fnew); err1: tcf_exts_destroy(tp, &e); return err; } -static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f) -{ - del_timer_sync(&f->perturb_timer); - tcf_exts_destroy(tp, &f->exts); - tcf_em_tree_destroy(tp, &f->ematches); - kfree(f); -} - static int flow_delete(struct tcf_proto *tp, unsigned long arg) { struct flow_filter *f = (struct flow_filter *)arg; - tcf_tree_lock(tp); - list_del(&f->list); - tcf_tree_unlock(tp); - flow_destroy_filter(tp, f); + list_del_rcu(&f->list); + call_rcu(&f->rcu, flow_destroy_filter); return 0; } @@ -532,28 +554,29 @@ static int flow_init(struct tcf_proto *tp) if (head == NULL) return -ENOBUFS; INIT_LIST_HEAD(&head->filters); - tp->root = head; + rcu_assign_pointer(tp->root, head); return 0; } static void flow_destroy(struct tcf_proto *tp) { - struct flow_head *head = tp->root; + struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f, *next; list_for_each_entry_safe(f, next, &head->filters, list) { - list_del(&f->list); - flow_destroy_filter(tp, f); + list_del_rcu(&f->list); + call_rcu(&f->rcu, flow_destroy_filter); } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static unsigned long flow_get(struct tcf_proto *tp, u32 handle) { - struct flow_head *head = tp->root; + struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f; - list_for_each_entry(f, &head->filters, list) + list_for_each_entry_rcu(f, &head->filters, list) if (f->handle == handle) return (unsigned long)f; return 0; @@ -626,10 +649,10 @@ nla_put_failure: static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct flow_head *head = tp->root; + struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f; - list_for_each_entry(f, &head->filters, list) { + list_for_each_entry_rcu(f, &head->filters, list) { if (arg->count < arg->skip) goto skip; if (arg->fn(tp, (unsigned long)f, arg) < 0) { diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 861b03ccfed0..2650285620ee 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -33,17 +33,20 @@ struct fw_head { u32 mask; - struct fw_filter *ht[HTSIZE]; + struct fw_filter __rcu *ht[HTSIZE]; + struct rcu_head rcu; }; struct fw_filter { - struct fw_filter *next; + struct fw_filter __rcu *next; u32 id; struct tcf_result res; #ifdef CONFIG_NET_CLS_IND int ifindex; #endif /* CONFIG_NET_CLS_IND */ struct tcf_exts exts; + struct tcf_proto *tp; + struct rcu_head rcu; }; static u32 fw_hash(u32 handle) @@ -56,14 +59,16 @@ static u32 fw_hash(u32 handle) static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct fw_head *head = tp->root; + struct fw_head *head = rcu_dereference_bh(tp->root); struct fw_filter *f; int r; u32 id = skb->mark; if (head != NULL) { id &= head->mask; - for (f = head->ht[fw_hash(id)]; f; f = f->next) { + + for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f; + f = rcu_dereference_bh(f->next)) { if (f->id == id) { *res = f->res; #ifdef CONFIG_NET_CLS_IND @@ -92,13 +97,14 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, static unsigned long fw_get(struct tcf_proto *tp, u32 handle) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f; if (head == NULL) return 0; - for (f = head->ht[fw_hash(handle)]; f; f = f->next) { + f = rtnl_dereference(head->ht[fw_hash(handle)]); + for (; f; f = rtnl_dereference(f->next)) { if (f->id == handle) return (unsigned long)f; } @@ -114,8 +120,11 @@ static int fw_init(struct tcf_proto *tp) return 0; } -static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f) +static void fw_delete_filter(struct rcu_head *head) { + struct fw_filter *f = container_of(head, struct fw_filter, rcu); + struct tcf_proto *tp = f->tp; + tcf_unbind_filter(tp, &f->res); tcf_exts_destroy(tp, &f->exts); kfree(f); @@ -123,7 +132,7 @@ static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f) static void fw_destroy(struct tcf_proto *tp) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f; int h; @@ -131,29 +140,33 @@ static void fw_destroy(struct tcf_proto *tp) return; for (h = 0; h < HTSIZE; h++) { - while ((f = head->ht[h]) != NULL) { - head->ht[h] = f->next; - fw_delete_filter(tp, f); + while ((f = rtnl_dereference(head->ht[h])) != NULL) { + RCU_INIT_POINTER(head->ht[h], + rtnl_dereference(f->next)); + call_rcu(&f->rcu, fw_delete_filter); } } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static int fw_delete(struct tcf_proto *tp, unsigned long arg) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f = (struct fw_filter *)arg; - struct fw_filter **fp; + struct fw_filter __rcu **fp; + struct fw_filter *pfp; if (head == NULL || f == NULL) goto out; - for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) { - if (*fp == f) { - tcf_tree_lock(tp); - *fp = f->next; - tcf_tree_unlock(tp); - fw_delete_filter(tp, f); + fp = &head->ht[fw_hash(f->id)]; + + for (pfp = rtnl_dereference(*fp); pfp; + fp = &pfp->next, pfp = rtnl_dereference(*fp)) { + if (pfp == f) { + RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); + call_rcu(&f->rcu, fw_delete_filter); return 0; } } @@ -171,7 +184,7 @@ static int fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct tcf_exts e; u32 mask; int err; @@ -220,7 +233,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f = (struct fw_filter *) *arg; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FW_MAX + 1]; @@ -233,10 +246,44 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; - if (f != NULL) { + if (f) { + struct fw_filter *pfp, *fnew; + struct fw_filter __rcu **fp; + if (f->id != handle && handle) return -EINVAL; - return fw_change_attrs(net, tp, f, tb, tca, base, ovr); + + fnew = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); + if (!fnew) + return -ENOBUFS; + + fnew->id = f->id; + fnew->res = f->res; +#ifdef CONFIG_NET_CLS_IND + fnew->ifindex = f->ifindex; +#endif /* CONFIG_NET_CLS_IND */ + fnew->tp = f->tp; + + tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE); + + err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr); + if (err < 0) { + kfree(fnew); + return err; + } + + fp = &head->ht[fw_hash(fnew->id)]; + for (pfp = rtnl_dereference(*fp); pfp; + fp = &pfp->next, pfp = rtnl_dereference(*fp)) + if (pfp == f) + break; + + RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next)); + rcu_assign_pointer(*fp, fnew); + call_rcu(&f->rcu, fw_delete_filter); + + *arg = (unsigned long)fnew; + return err; } if (!handle) @@ -252,9 +299,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, return -ENOBUFS; head->mask = mask; - tcf_tree_lock(tp); - tp->root = head; - tcf_tree_unlock(tp); + rcu_assign_pointer(tp->root, head); } f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); @@ -263,15 +308,14 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE); f->id = handle; + f->tp = tp; err = fw_change_attrs(net, tp, f, tb, tca, base, ovr); if (err < 0) goto errout; - f->next = head->ht[fw_hash(handle)]; - tcf_tree_lock(tp); - head->ht[fw_hash(handle)] = f; - tcf_tree_unlock(tp); + RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); + rcu_assign_pointer(head->ht[fw_hash(handle)], f); *arg = (unsigned long)f; return 0; @@ -283,7 +327,7 @@ errout: static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); int h; if (head == NULL) @@ -295,7 +339,8 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) for (h = 0; h < HTSIZE; h++) { struct fw_filter *f; - for (f = head->ht[h]; f; f = f->next) { + for (f = rtnl_dereference(head->ht[h]); f; + f = rtnl_dereference(f->next)) { if (arg->count < arg->skip) { arg->count++; continue; @@ -312,7 +357,7 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { - struct fw_head *head = tp->root; + struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f = (struct fw_filter *)fh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index dd9fc2523c76..ba96deacf27c 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -29,25 +29,26 @@ * are mutually exclusive. * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX" */ - struct route4_fastmap { - struct route4_filter *filter; - u32 id; - int iif; + struct route4_filter *filter; + u32 id; + int iif; }; struct route4_head { - struct route4_fastmap fastmap[16]; - struct route4_bucket *table[256 + 1]; + struct route4_fastmap fastmap[16]; + struct route4_bucket __rcu *table[256 + 1]; + struct rcu_head rcu; }; struct route4_bucket { /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */ - struct route4_filter *ht[16 + 16 + 1]; + struct route4_filter __rcu *ht[16 + 16 + 1]; + struct rcu_head rcu; }; struct route4_filter { - struct route4_filter *next; + struct route4_filter __rcu *next; u32 id; int iif; @@ -55,6 +56,8 @@ struct route4_filter { struct tcf_exts exts; u32 handle; struct route4_bucket *bkt; + struct tcf_proto *tp; + struct rcu_head rcu; }; #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) @@ -64,14 +67,13 @@ static inline int route4_fastmap_hash(u32 id, int iif) return id & 0xF; } +static DEFINE_SPINLOCK(fastmap_lock); static void -route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id) +route4_reset_fastmap(struct route4_head *head) { - spinlock_t *root_lock = qdisc_root_sleeping_lock(q); - - spin_lock_bh(root_lock); + spin_lock_bh(&fastmap_lock); memset(head->fastmap, 0, sizeof(head->fastmap)); - spin_unlock_bh(root_lock); + spin_unlock_bh(&fastmap_lock); } static void @@ -80,9 +82,12 @@ route4_set_fastmap(struct route4_head *head, u32 id, int iif, { int h = route4_fastmap_hash(id, iif); + /* fastmap updates must look atomic to aling id, iff, filter */ + spin_lock_bh(&fastmap_lock); head->fastmap[h].id = id; head->fastmap[h].iif = iif; head->fastmap[h].filter = f; + spin_unlock_bh(&fastmap_lock); } static inline int route4_hash_to(u32 id) @@ -123,7 +128,7 @@ static inline int route4_hash_wild(void) static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct route4_head *head = tp->root; + struct route4_head *head = rcu_dereference_bh(tp->root); struct dst_entry *dst; struct route4_bucket *b; struct route4_filter *f; @@ -141,32 +146,43 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp, iif = inet_iif(skb); h = route4_fastmap_hash(id, iif); + + spin_lock(&fastmap_lock); if (id == head->fastmap[h].id && iif == head->fastmap[h].iif && (f = head->fastmap[h].filter) != NULL) { - if (f == ROUTE4_FAILURE) + if (f == ROUTE4_FAILURE) { + spin_unlock(&fastmap_lock); goto failure; + } *res = f->res; + spin_unlock(&fastmap_lock); return 0; } + spin_unlock(&fastmap_lock); h = route4_hash_to(id); restart: - b = head->table[h]; + b = rcu_dereference_bh(head->table[h]); if (b) { - for (f = b->ht[route4_hash_from(id)]; f; f = f->next) + for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]); + f; + f = rcu_dereference_bh(f->next)) if (f->id == id) ROUTE4_APPLY_RESULT(); - for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next) + for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]); + f; + f = rcu_dereference_bh(f->next)) if (f->iif == iif) ROUTE4_APPLY_RESULT(); - for (f = b->ht[route4_hash_wild()]; f; f = f->next) + for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]); + f; + f = rcu_dereference_bh(f->next)) ROUTE4_APPLY_RESULT(); - } if (h < 256) { h = 256; @@ -213,7 +229,7 @@ static inline u32 from_hash(u32 id) static unsigned long route4_get(struct tcf_proto *tp, u32 handle) { - struct route4_head *head = tp->root; + struct route4_head *head = rtnl_dereference(tp->root); struct route4_bucket *b; struct route4_filter *f; unsigned int h1, h2; @@ -229,9 +245,11 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) if (h2 > 32) return 0; - b = head->table[h1]; + b = rtnl_dereference(head->table[h1]); if (b) { - for (f = b->ht[h2]; f; f = f->next) + for (f = rtnl_dereference(b->ht[h2]); + f; + f = rtnl_dereference(f->next)) if (f->handle == handle) return (unsigned long)f; } @@ -248,8 +266,11 @@ static int route4_init(struct tcf_proto *tp) } static void -route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f) +route4_delete_filter(struct rcu_head *head) { + struct route4_filter *f = container_of(head, struct route4_filter, rcu); + struct tcf_proto *tp = f->tp; + tcf_unbind_filter(tp, &f->res); tcf_exts_destroy(tp, &f->exts); kfree(f); @@ -257,7 +278,7 @@ route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f) static void route4_destroy(struct tcf_proto *tp) { - struct route4_head *head = tp->root; + struct route4_head *head = rtnl_dereference(tp->root); int h1, h2; if (head == NULL) @@ -266,28 +287,35 @@ static void route4_destroy(struct tcf_proto *tp) for (h1 = 0; h1 <= 256; h1++) { struct route4_bucket *b; - b = head->table[h1]; + b = rtnl_dereference(head->table[h1]); if (b) { for (h2 = 0; h2 <= 32; h2++) { struct route4_filter *f; - while ((f = b->ht[h2]) != NULL) { - b->ht[h2] = f->next; - route4_delete_filter(tp, f); + while ((f = rtnl_dereference(b->ht[h2])) != NULL) { + struct route4_filter *next; + + next = rtnl_dereference(f->next); + RCU_INIT_POINTER(b->ht[h2], next); + call_rcu(&f->rcu, route4_delete_filter); } } - kfree(b); + RCU_INIT_POINTER(head->table[h1], NULL); + kfree_rcu(b, rcu); } } - kfree(head); + RCU_INIT_POINTER(tp->root, NULL); + kfree_rcu(head, rcu); } static int route4_delete(struct tcf_proto *tp, unsigned long arg) { - struct route4_head *head = tp->root; - struct route4_filter **fp, *f = (struct route4_filter *)arg; - unsigned int h = 0; + struct route4_head *head = rtnl_dereference(tp->root); + struct route4_filter *f = (struct route4_filter *)arg; + struct route4_filter __rcu **fp; + struct route4_filter *nf; struct route4_bucket *b; + unsigned int h = 0; int i; if (!head || !f) @@ -296,27 +324,35 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) h = f->handle; b = f->bkt; - for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) { - if (*fp == f) { - tcf_tree_lock(tp); - *fp = f->next; - tcf_tree_unlock(tp); + fp = &b->ht[from_hash(h >> 16)]; + for (nf = rtnl_dereference(*fp); nf; + fp = &nf->next, nf = rtnl_dereference(*fp)) { + if (nf == f) { + /* unlink it */ + RCU_INIT_POINTER(*fp, rtnl_dereference(f->next)); - route4_reset_fastmap(tp->q, head, f->id); - route4_delete_filter(tp, f); + /* Remove any fastmap lookups that might ref filter + * notice we unlink'd the filter so we can't get it + * back in the fastmap. + */ + route4_reset_fastmap(head); - /* Strip tree */ + /* Delete it */ + call_rcu(&f->rcu, route4_delete_filter); - for (i = 0; i <= 32; i++) - if (b->ht[i]) + /* Strip RTNL protected tree */ + for (i = 0; i <= 32; i++) { + struct route4_filter *rt; + + rt = rtnl_dereference(b->ht[i]); + if (rt) return 0; + } /* OK, session has no flows */ - tcf_tree_lock(tp); - head->table[to_hash(h)] = NULL; - tcf_tree_unlock(tp); + RCU_INIT_POINTER(head->table[to_hash(h)], NULL); + kfree_rcu(b, rcu); - kfree(b); return 0; } } @@ -380,26 +416,25 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, } h1 = to_hash(nhandle); - b = head->table[h1]; + b = rtnl_dereference(head->table[h1]); if (!b) { err = -ENOBUFS; b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); if (b == NULL) goto errout; - tcf_tree_lock(tp); - head->table[h1] = b; - tcf_tree_unlock(tp); + rcu_assign_pointer(head->table[h1], b); } else { unsigned int h2 = from_hash(nhandle >> 16); err = -EEXIST; - for (fp = b->ht[h2]; fp; fp = fp->next) + for (fp = rtnl_dereference(b->ht[h2]); + fp; + fp = rtnl_dereference(fp->next)) if (fp->handle == f->handle) goto errout; } - tcf_tree_lock(tp); if (tb[TCA_ROUTE4_TO]) f->id = to; @@ -410,7 +445,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, f->handle = nhandle; f->bkt = b; - tcf_tree_unlock(tp); + f->tp = tp; if (tb[TCA_ROUTE4_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]); @@ -431,14 +466,15 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct route4_head *head = tp->root; - struct route4_filter *f, *f1, **fp; + struct route4_head *head = rtnl_dereference(tp->root); + struct route4_filter __rcu **fp; + struct route4_filter *fold, *f1, *pfp, *f = NULL; struct route4_bucket *b; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_ROUTE4_MAX + 1]; unsigned int h, th; - u32 old_handle = 0; int err; + bool new = true; if (opt == NULL) return handle ? -EINVAL : 0; @@ -447,70 +483,70 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; - f = (struct route4_filter *)*arg; - if (f) { - if (f->handle != handle && handle) + fold = (struct route4_filter *)*arg; + if (fold && handle && fold->handle != handle) return -EINVAL; - if (f->bkt) - old_handle = f->handle; - - err = route4_set_parms(net, tp, base, f, handle, head, tb, - tca[TCA_RATE], 0, ovr); - if (err < 0) - return err; - - goto reinsert; - } - err = -ENOBUFS; if (head == NULL) { head = kzalloc(sizeof(struct route4_head), GFP_KERNEL); if (head == NULL) goto errout; - - tcf_tree_lock(tp); - tp->root = head; - tcf_tree_unlock(tp); + rcu_assign_pointer(tp->root, head); } f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL); - if (f == NULL) + if (!f) goto errout; tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); + if (fold) { + f->id = fold->id; + f->iif = fold->iif; + f->res = fold->res; + f->handle = fold->handle; + + f->tp = fold->tp; + f->bkt = fold->bkt; + new = false; + } + err = route4_set_parms(net, tp, base, f, handle, head, tb, - tca[TCA_RATE], 1, ovr); + tca[TCA_RATE], new, ovr); if (err < 0) goto errout; -reinsert: h = from_hash(f->handle >> 16); - for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next) + fp = &f->bkt->ht[h]; + for (pfp = rtnl_dereference(*fp); + (f1 = rtnl_dereference(*fp)) != NULL; + fp = &f1->next) if (f->handle < f1->handle) break; - f->next = f1; - tcf_tree_lock(tp); - *fp = f; + rcu_assign_pointer(f->next, f1); + rcu_assign_pointer(*fp, f); - if (old_handle && f->handle != old_handle) { - th = to_hash(old_handle); - h = from_hash(old_handle >> 16); - b = head->table[th]; + if (fold && fold->handle && f->handle != fold->handle) { + th = to_hash(fold->handle); + h = from_hash(fold->handle >> 16); + b = rtnl_dereference(head->table[th]); if (b) { - for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) { - if (*fp == f) { + fp = &b->ht[h]; + for (pfp = rtnl_dereference(*fp); pfp; + fp = &pfp->next, pfp = rtnl_dereference(*fp)) { + if (pfp == f) { *fp = f->next; break; } } } } - tcf_tree_unlock(tp); - route4_reset_fastmap(tp->q, head, f->id); + route4_reset_fastmap(head); *arg = (unsigned long)f; + if (fold) + call_rcu(&fold->rcu, route4_delete_filter); return 0; errout: @@ -520,7 +556,7 @@ errout: static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct route4_head *head = tp->root; + struct route4_head *head = rtnl_dereference(tp->root); unsigned int h, h1; if (head == NULL) @@ -530,13 +566,15 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) return; for (h = 0; h <= 256; h++) { - struct route4_bucket *b = head->table[h]; + struct route4_bucket *b = rtnl_dereference(head->table[h]); if (b) { for (h1 = 0; h1 <= 32; h1++) { struct route4_filter *f; - for (f = b->ht[h1]; f; f = f->next) { + for (f = rtnl_dereference(b->ht[h1]); + f; + f = rtnl_dereference(f->next)) { if (arg->count < arg->skip) { arg->count++; continue; diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 1020e233a5d6..b044c208b133 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -70,31 +70,34 @@ struct rsvp_head { u32 tmap[256/32]; u32 hgenerator; u8 tgenerator; - struct rsvp_session *ht[256]; + struct rsvp_session __rcu *ht[256]; + struct rcu_head rcu; }; struct rsvp_session { - struct rsvp_session *next; - __be32 dst[RSVP_DST_LEN]; - struct tc_rsvp_gpi dpi; - u8 protocol; - u8 tunnelid; + struct rsvp_session __rcu *next; + __be32 dst[RSVP_DST_LEN]; + struct tc_rsvp_gpi dpi; + u8 protocol; + u8 tunnelid; /* 16 (src,sport) hash slots, and one wildcard source slot */ - struct rsvp_filter *ht[16 + 1]; + struct rsvp_filter __rcu *ht[16 + 1]; + struct rcu_head rcu; }; struct rsvp_filter { - struct rsvp_filter *next; - __be32 src[RSVP_DST_LEN]; - struct tc_rsvp_gpi spi; - u8 tunnelhdr; + struct rsvp_filter __rcu *next; + __be32 src[RSVP_DST_LEN]; + struct tc_rsvp_gpi spi; + u8 tunnelhdr; - struct tcf_result res; - struct tcf_exts exts; + struct tcf_result res; + struct tcf_exts exts; - u32 handle; - struct rsvp_session *sess; + u32 handle; + struct rsvp_session *sess; + struct rcu_head rcu; }; static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) @@ -128,7 +131,7 @@ static inline unsigned int hash_src(__be32 *src) static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; + struct rsvp_head *head = rcu_dereference_bh(tp->root); struct rsvp_session *s; struct rsvp_filter *f; unsigned int h1, h2; @@ -169,7 +172,8 @@ restart: h1 = hash_dst(dst, protocol, tunnelid); h2 = hash_src(src); - for (s = sht[h1]; s; s = s->next) { + for (s = rcu_dereference_bh(head->ht[h1]); s; + s = rcu_dereference_bh(s->next)) { if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] && protocol == s->protocol && !(s->dpi.mask & @@ -181,7 +185,8 @@ restart: #endif tunnelid == s->tunnelid) { - for (f = s->ht[h2]; f; f = f->next) { + for (f = rcu_dereference_bh(s->ht[h2]); f; + f = rcu_dereference_bh(f->next)) { if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] && !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key)) #if RSVP_DST_LEN == 4 @@ -205,7 +210,8 @@ matched: } /* And wildcard bucket... */ - for (f = s->ht[16]; f; f = f->next) { + for (f = rcu_dereference_bh(s->ht[16]); f; + f = rcu_dereference_bh(f->next)) { *res = f->res; RSVP_APPLY_RESULT(); goto matched; @@ -218,7 +224,7 @@ matched: static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) { - struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; + struct rsvp_head *head = rtnl_dereference(tp->root); struct rsvp_session *s; struct rsvp_filter *f; unsigned int h1 = handle & 0xFF; @@ -227,8 +233,10 @@ static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) if (h2 > 16) return 0; - for (s = sht[h1]; s; s = s->next) { - for (f = s->ht[h2]; f; f = f->next) { + for (s = rtnl_dereference(head->ht[h1]); s; + s = rtnl_dereference(s->next)) { + for (f = rtnl_dereference(s->ht[h2]); f; + f = rtnl_dereference(f->next)) { if (f->handle == handle) return (unsigned long)f; } @@ -246,7 +254,7 @@ static int rsvp_init(struct tcf_proto *tp) data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL); if (data) { - tp->root = data; + rcu_assign_pointer(tp->root, data); return 0; } return -ENOBUFS; @@ -257,53 +265,54 @@ rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) { tcf_unbind_filter(tp, &f->res); tcf_exts_destroy(tp, &f->exts); - kfree(f); + kfree_rcu(f, rcu); } static void rsvp_destroy(struct tcf_proto *tp) { - struct rsvp_head *data = xchg(&tp->root, NULL); - struct rsvp_session **sht; + struct rsvp_head *data = rtnl_dereference(tp->root); int h1, h2; if (data == NULL) return; - sht = data->ht; + RCU_INIT_POINTER(tp->root, NULL); for (h1 = 0; h1 < 256; h1++) { struct rsvp_session *s; - while ((s = sht[h1]) != NULL) { - sht[h1] = s->next; + while ((s = rtnl_dereference(data->ht[h1])) != NULL) { + RCU_INIT_POINTER(data->ht[h1], s->next); for (h2 = 0; h2 <= 16; h2++) { struct rsvp_filter *f; - while ((f = s->ht[h2]) != NULL) { - s->ht[h2] = f->next; + while ((f = rtnl_dereference(s->ht[h2])) != NULL) { + rcu_assign_pointer(s->ht[h2], f->next); rsvp_delete_filter(tp, f); } } - kfree(s); + kfree_rcu(s, rcu); } } - kfree(data); + kfree_rcu(data, rcu); } static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) { - struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg; + struct rsvp_head *head = rtnl_dereference(tp->root); + struct rsvp_filter *nfp, *f = (struct rsvp_filter *)arg; + struct rsvp_filter __rcu **fp; unsigned int h = f->handle; - struct rsvp_session **sp; - struct rsvp_session *s = f->sess; + struct rsvp_session __rcu **sp; + struct rsvp_session *nsp, *s = f->sess; int i; - for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) { - if (*fp == f) { - tcf_tree_lock(tp); - *fp = f->next; - tcf_tree_unlock(tp); + fp = &s->ht[(h >> 8) & 0xFF]; + for (nfp = rtnl_dereference(*fp); nfp; + fp = &nfp->next, nfp = rtnl_dereference(*fp)) { + if (nfp == f) { + RCU_INIT_POINTER(*fp, f->next); rsvp_delete_filter(tp, f); /* Strip tree */ @@ -313,14 +322,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) return 0; /* OK, session has no flows */ - for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF]; - *sp; sp = &(*sp)->next) { - if (*sp == s) { - tcf_tree_lock(tp); - *sp = s->next; - tcf_tree_unlock(tp); - - kfree(s); + sp = &head->ht[h & 0xFF]; + for (nsp = rtnl_dereference(*sp); nsp; + sp = &nsp->next, nsp = rtnl_dereference(*sp)) { + if (nsp == s) { + RCU_INIT_POINTER(*sp, s->next); + kfree_rcu(s, rcu); return 0; } } @@ -333,7 +340,7 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg) static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt) { - struct rsvp_head *data = tp->root; + struct rsvp_head *data = rtnl_dereference(tp->root); int i = 0xFFFF; while (i-- > 0) { @@ -361,7 +368,7 @@ static int tunnel_bts(struct rsvp_head *data) static void tunnel_recycle(struct rsvp_head *data) { - struct rsvp_session **sht = data->ht; + struct rsvp_session __rcu **sht = data->ht; u32 tmap[256/32]; int h1, h2; @@ -369,11 +376,13 @@ static void tunnel_recycle(struct rsvp_head *data) for (h1 = 0; h1 < 256; h1++) { struct rsvp_session *s; - for (s = sht[h1]; s; s = s->next) { + for (s = rtnl_dereference(sht[h1]); s; + s = rtnl_dereference(s->next)) { for (h2 = 0; h2 <= 16; h2++) { struct rsvp_filter *f; - for (f = s->ht[h2]; f; f = f->next) { + for (f = rtnl_dereference(s->ht[h2]); f; + f = rtnl_dereference(f->next)) { if (f->tunnelhdr == 0) continue; data->tgenerator = f->res.classid; @@ -417,9 +426,11 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, struct nlattr **tca, unsigned long *arg, bool ovr) { - struct rsvp_head *data = tp->root; - struct rsvp_filter *f, **fp; - struct rsvp_session *s, **sp; + struct rsvp_head *data = rtnl_dereference(tp->root); + struct rsvp_filter *f, *nfp; + struct rsvp_filter __rcu **fp; + struct rsvp_session *nsp, *s; + struct rsvp_session __rcu **sp; struct tc_rsvp_pinfo *pinfo = NULL; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_RSVP_MAX + 1]; @@ -499,7 +510,9 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, goto errout; } - for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) { + for (sp = &data->ht[h1]; + (s = rtnl_dereference(*sp)) != NULL; + sp = &s->next) { if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] && pinfo && pinfo->protocol == s->protocol && memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 && @@ -521,12 +534,16 @@ insert: tcf_exts_change(tp, &f->exts, &e); - for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next) - if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask) + fp = &s->ht[h2]; + for (nfp = rtnl_dereference(*fp); nfp; + fp = &nfp->next, nfp = rtnl_dereference(*fp)) { + __u32 mask = nfp->spi.mask & f->spi.mask; + + if (mask != f->spi.mask) break; - f->next = *fp; - wmb(); - *fp = f; + } + RCU_INIT_POINTER(f->next, nfp); + rcu_assign_pointer(*fp, f); *arg = (unsigned long)f; return 0; @@ -546,13 +563,14 @@ insert: s->protocol = pinfo->protocol; s->tunnelid = pinfo->tunnelid; } - for (sp = &data->ht[h1]; *sp; sp = &(*sp)->next) { - if (((*sp)->dpi.mask&s->dpi.mask) != s->dpi.mask) + sp = &data->ht[h1]; + for (nsp = rtnl_dereference(*sp); nsp; + sp = &nsp->next, nsp = rtnl_dereference(*sp)) { + if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask) break; } - s->next = *sp; - wmb(); - *sp = s; + RCU_INIT_POINTER(s->next, nsp); + rcu_assign_pointer(*sp, s); goto insert; @@ -565,7 +583,7 @@ errout2: static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) { - struct rsvp_head *head = tp->root; + struct rsvp_head *head = rtnl_dereference(tp->root); unsigned int h, h1; if (arg->stop) @@ -574,11 +592,13 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) for (h = 0; h < 256; h++) { struct rsvp_session *s; - for (s = head->ht[h]; s; s = s->next) { + for (s = rtnl_dereference(head->ht[h]); s; + s = rtnl_dereference(s->next)) { for (h1 = 0; h1 <= 16; h1++) { struct rsvp_filter *f; - for (f = s->ht[h1]; f; f = f->next) { + for (f = rtnl_dereference(s->ht[h1]); f; + f = rtnl_dereference(f->next)) { if (arg->count < arg->skip) { arg->count++; continue; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 3e9f76413b3b..5054fae33a48 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -32,19 +32,21 @@ struct tcindex_filter_result { struct tcindex_filter { u16 key; struct tcindex_filter_result result; - struct tcindex_filter *next; + struct tcindex_filter __rcu *next; + struct rcu_head rcu; }; struct tcindex_data { struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ - struct tcindex_filter **h; /* imperfect hash; only used if !perfect; - NULL if unused */ + struct tcindex_filter __rcu **h; /* imperfect hash; */ + struct tcf_proto *tp; u16 mask; /* AND key with mask */ - int shift; /* shift ANDed key to the right */ - int hash; /* hash table size; 0 if undefined */ - int alloc_hash; /* allocated size */ - int fall_through; /* 0: only classify if explicit match */ + u32 shift; /* shift ANDed key to the right */ + u32 hash; /* hash table size; 0 if undefined */ + u32 alloc_hash; /* allocated size */ + u32 fall_through; /* 0: only classify if explicit match */ + struct rcu_head rcu; }; static inline int @@ -56,13 +58,18 @@ tcindex_filter_is_set(struct tcindex_filter_result *r) static struct tcindex_filter_result * tcindex_lookup(struct tcindex_data *p, u16 key) { - struct tcindex_filter *f; + if (p->perfect) { + struct tcindex_filter_result *f = p->perfect + key; + + return tcindex_filter_is_set(f) ? f : NULL; + } else if (p->h) { + struct tcindex_filter __rcu **fp; + struct tcindex_filter *f; - if (p->perfect) - return tcindex_filter_is_set(p->perfect + key) ? - p->perfect + key : NULL; - else if (p->h) { - for (f = p->h[key % p->hash]; f; f = f->next) + fp = &p->h[key % p->hash]; + for (f = rcu_dereference_bh_rtnl(*fp); + f; + fp = &f->next, f = rcu_dereference_bh_rtnl(*fp)) if (f->key == key) return &f->result; } @@ -74,7 +81,7 @@ tcindex_lookup(struct tcindex_data *p, u16 key) static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rcu_dereference_bh(tp->root); struct tcindex_filter_result *f; int key = (skb->tc_index & p->mask) >> p->shift; @@ -99,7 +106,7 @@ static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r; pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); @@ -129,49 +136,59 @@ static int tcindex_init(struct tcf_proto *tp) p->hash = DEFAULT_HASH_SIZE; p->fall_through = 1; - tp->root = p; + rcu_assign_pointer(tp->root, p); return 0; } - static int -__tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock) +tcindex_delete(struct tcf_proto *tp, unsigned long arg) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg; + struct tcindex_filter __rcu **walk; struct tcindex_filter *f = NULL; - pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f); + pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p\n", tp, arg, p); if (p->perfect) { if (!r->res.class) return -ENOENT; } else { int i; - struct tcindex_filter **walk = NULL; - for (i = 0; i < p->hash; i++) - for (walk = p->h+i; *walk; walk = &(*walk)->next) - if (&(*walk)->result == r) + for (i = 0; i < p->hash; i++) { + walk = p->h + i; + for (f = rtnl_dereference(*walk); f; + walk = &f->next, f = rtnl_dereference(*walk)) { + if (&f->result == r) goto found; + } + } return -ENOENT; found: - f = *walk; - if (lock) - tcf_tree_lock(tp); - *walk = f->next; - if (lock) - tcf_tree_unlock(tp); + rcu_assign_pointer(*walk, rtnl_dereference(f->next)); } tcf_unbind_filter(tp, &r->res); tcf_exts_destroy(tp, &r->exts); - kfree(f); + if (f) + kfree_rcu(f, rcu); return 0; } -static int tcindex_delete(struct tcf_proto *tp, unsigned long arg) +static int tcindex_destroy_element(struct tcf_proto *tp, + unsigned long arg, + struct tcf_walker *walker) +{ + return tcindex_delete(tp, arg); +} + +static void __tcindex_destroy(struct rcu_head *head) { - return __tcindex_delete(tp, arg, 1); + struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); + + kfree(p->perfect); + kfree(p->h); + kfree(p); } static inline int @@ -194,6 +211,14 @@ static void tcindex_filter_result_init(struct tcindex_filter_result *r) tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); } +static void __tcindex_partial_destroy(struct rcu_head *head) +{ + struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); + + kfree(p->perfect); + kfree(p); +} + static int tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, u32 handle, struct tcindex_data *p, @@ -203,7 +228,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, int err, balloc = 0; struct tcindex_filter_result new_filter_result, *old_r = r; struct tcindex_filter_result cr; - struct tcindex_data cp; + struct tcindex_data *cp, *oldp; struct tcindex_filter *f = NULL; /* make gcc behave */ struct tcf_exts e; @@ -212,84 +237,118 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, if (err < 0) return err; - memcpy(&cp, p, sizeof(cp)); - tcindex_filter_result_init(&new_filter_result); + /* tcindex_data attributes must look atomic to classifier/lookup so + * allocate new tcindex data and RCU assign it onto root. Keeping + * perfect hash and hash pointers from old data. + */ + cp = kzalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) { + err = -ENOMEM; + goto errout; + } + cp->mask = p->mask; + cp->shift = p->shift; + cp->hash = p->hash; + cp->alloc_hash = p->alloc_hash; + cp->fall_through = p->fall_through; + cp->tp = tp; + + if (p->perfect) { + cp->perfect = kmemdup(p->perfect, + sizeof(*r) * cp->hash, GFP_KERNEL); + if (!cp->perfect) + goto errout; + balloc = 1; + } + cp->h = p->h; + + tcindex_filter_result_init(&new_filter_result); tcindex_filter_result_init(&cr); if (old_r) cr.res = r->res; if (tb[TCA_TCINDEX_HASH]) - cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); + cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); if (tb[TCA_TCINDEX_MASK]) - cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); + cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); if (tb[TCA_TCINDEX_SHIFT]) - cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); + cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); err = -EBUSY; + /* Hash already allocated, make sure that we still meet the * requirements for the allocated hash. */ - if (cp.perfect) { - if (!valid_perfect_hash(&cp) || - cp.hash > cp.alloc_hash) - goto errout; - } else if (cp.h && cp.hash != cp.alloc_hash) - goto errout; + if (cp->perfect) { + if (!valid_perfect_hash(cp) || + cp->hash > cp->alloc_hash) + goto errout_alloc; + } else if (cp->h && cp->hash != cp->alloc_hash) { + goto errout_alloc; + } err = -EINVAL; if (tb[TCA_TCINDEX_FALL_THROUGH]) - cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); + cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); - if (!cp.hash) { + if (!cp->hash) { /* Hash not specified, use perfect hash if the upper limit * of the hashing index is below the threshold. */ - if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD) - cp.hash = (cp.mask >> cp.shift) + 1; + if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) + cp->hash = (cp->mask >> cp->shift) + 1; else - cp.hash = DEFAULT_HASH_SIZE; + cp->hash = DEFAULT_HASH_SIZE; } - if (!cp.perfect && !cp.h) - cp.alloc_hash = cp.hash; + if (!cp->perfect && cp->h) + cp->alloc_hash = cp->hash; /* Note: this could be as restrictive as if (handle & ~(mask >> shift)) * but then, we'd fail handles that may become valid after some future * mask change. While this is extremely unlikely to ever matter, * the check below is safer (and also more backwards-compatible). */ - if (cp.perfect || valid_perfect_hash(&cp)) - if (handle >= cp.alloc_hash) - goto errout; + if (cp->perfect || valid_perfect_hash(cp)) + if (handle >= cp->alloc_hash) + goto errout_alloc; err = -ENOMEM; - if (!cp.perfect && !cp.h) { - if (valid_perfect_hash(&cp)) { + if (!cp->perfect && !cp->h) { + if (valid_perfect_hash(cp)) { int i; - cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL); - if (!cp.perfect) - goto errout; - for (i = 0; i < cp.hash; i++) - tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT, + cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL); + if (!cp->perfect) + goto errout_alloc; + for (i = 0; i < cp->hash; i++) + tcf_exts_init(&cp->perfect[i].exts, + TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); balloc = 1; } else { - cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL); - if (!cp.h) - goto errout; + struct tcindex_filter __rcu **hash; + + hash = kcalloc(cp->hash, + sizeof(struct tcindex_filter *), + GFP_KERNEL); + + if (!hash) + goto errout_alloc; + + cp->h = hash; balloc = 2; } } - if (cp.perfect) - r = cp.perfect + handle; + if (cp->perfect) + r = cp->perfect + handle; else - r = tcindex_lookup(&cp, handle) ? : &new_filter_result; + r = tcindex_lookup(cp, handle) ? : &new_filter_result; if (r == &new_filter_result) { f = kzalloc(sizeof(*f), GFP_KERNEL); @@ -307,33 +366,41 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, else tcf_exts_change(tp, &cr.exts, &e); - tcf_tree_lock(tp); if (old_r && old_r != r) tcindex_filter_result_init(old_r); - memcpy(p, &cp, sizeof(cp)); + oldp = p; r->res = cr.res; + rcu_assign_pointer(tp->root, cp); if (r == &new_filter_result) { - struct tcindex_filter **fp; + struct tcindex_filter *nfp; + struct tcindex_filter __rcu **fp; f->key = handle; f->result = new_filter_result; f->next = NULL; - for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next) - /* nothing */; - *fp = f; + + fp = cp->h + (handle % cp->hash); + for (nfp = rtnl_dereference(*fp); + nfp; + fp = &nfp->next, nfp = rtnl_dereference(*fp)) + ; /* nothing */ + + rcu_assign_pointer(*fp, f); } - tcf_tree_unlock(tp); + if (oldp) + call_rcu(&oldp->rcu, __tcindex_partial_destroy); return 0; errout_alloc: if (balloc == 1) - kfree(cp.perfect); + kfree(cp->perfect); else if (balloc == 2) - kfree(cp.h); + kfree(cp->h); errout: + kfree(cp); tcf_exts_destroy(tp, &e); return err; } @@ -345,7 +412,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb, { struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_TCINDEX_MAX + 1]; - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg; int err; @@ -364,10 +431,9 @@ tcindex_change(struct net *net, struct sk_buff *in_skb, tca[TCA_RATE], ovr); } - static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter *f, *next; int i; @@ -390,8 +456,8 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) if (!p->h) return; for (i = 0; i < p->hash; i++) { - for (f = p->h[i]; f; f = next) { - next = f->next; + for (f = rtnl_dereference(p->h[i]); f; f = next) { + next = rtnl_dereference(f->next); if (walker->count >= walker->skip) { if (walker->fn(tp, (unsigned long) &f->result, walker) < 0) { @@ -404,17 +470,9 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) } } - -static int tcindex_destroy_element(struct tcf_proto *tp, - unsigned long arg, struct tcf_walker *walker) -{ - return __tcindex_delete(tp, arg, 0); -} - - static void tcindex_destroy(struct tcf_proto *tp) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcf_walker walker; pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); @@ -422,17 +480,16 @@ static void tcindex_destroy(struct tcf_proto *tp) walker.skip = 0; walker.fn = tcindex_destroy_element; tcindex_walk(tp, &walker); - kfree(p->perfect); - kfree(p->h); - kfree(p); - tp->root = NULL; + + RCU_INIT_POINTER(tp->root, NULL); + call_rcu(&p->rcu, __tcindex_destroy); } static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { - struct tcindex_data *p = tp->root; + struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; @@ -455,15 +512,18 @@ static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, nla_nest_end(skb, nest); } else { if (p->perfect) { - t->tcm_handle = r-p->perfect; + t->tcm_handle = r - p->perfect; } else { struct tcindex_filter *f; + struct tcindex_filter __rcu **fp; int i; t->tcm_handle = 0; for (i = 0; !t->tcm_handle && i < p->hash; i++) { - for (f = p->h[i]; !t->tcm_handle && f; - f = f->next) { + fp = &p->h[i]; + for (f = rtnl_dereference(*fp); + !t->tcm_handle && f; + fp = &f->next, f = rtnl_dereference(*fp)) { if (&f->result == r) t->tcm_handle = f->key; } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 70c0be8d0121..ef97a646ee94 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -36,6 +36,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> +#include <linux/percpu.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/bitmap.h> @@ -44,40 +45,49 @@ #include <net/pkt_cls.h> struct tc_u_knode { - struct tc_u_knode *next; + struct tc_u_knode __rcu *next; u32 handle; - struct tc_u_hnode *ht_up; + struct tc_u_hnode __rcu *ht_up; struct tcf_exts exts; #ifdef CONFIG_NET_CLS_IND int ifindex; #endif u8 fshift; struct tcf_result res; - struct tc_u_hnode *ht_down; + struct tc_u_hnode __rcu *ht_down; #ifdef CONFIG_CLS_U32_PERF - struct tc_u32_pcnt *pf; + struct tc_u32_pcnt __percpu *pf; #endif #ifdef CONFIG_CLS_U32_MARK - struct tc_u32_mark mark; + u32 val; + u32 mask; + u32 __percpu *pcpu_success; #endif + struct tcf_proto *tp; + struct rcu_head rcu; + /* The 'sel' field MUST be the last field in structure to allow for + * tc_u32_keys allocated at end of structure. + */ struct tc_u32_sel sel; }; struct tc_u_hnode { - struct tc_u_hnode *next; + struct tc_u_hnode __rcu *next; u32 handle; u32 prio; struct tc_u_common *tp_c; int refcnt; unsigned int divisor; - struct tc_u_knode *ht[1]; + struct tc_u_knode __rcu *ht[1]; + struct rcu_head rcu; }; struct tc_u_common { - struct tc_u_hnode *hlist; + struct tc_u_hnode __rcu *hlist; struct Qdisc *q; int refcnt; u32 hgenerator; + struct rcu_head rcu; }; static inline unsigned int u32_hash_fold(__be32 key, @@ -96,7 +106,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct unsigned int off; } stack[TC_U32_MAXDEPTH]; - struct tc_u_hnode *ht = tp->root; + struct tc_u_hnode *ht = rcu_dereference_bh(tp->root); unsigned int off = skb_network_offset(skb); struct tc_u_knode *n; int sdepth = 0; @@ -108,23 +118,23 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct int i, r; next_ht: - n = ht->ht[sel]; + n = rcu_dereference_bh(ht->ht[sel]); next_knode: if (n) { struct tc_u32_key *key = n->sel.keys; #ifdef CONFIG_CLS_U32_PERF - n->pf->rcnt += 1; + __this_cpu_inc(n->pf->rcnt); j = 0; #endif #ifdef CONFIG_CLS_U32_MARK - if ((skb->mark & n->mark.mask) != n->mark.val) { - n = n->next; + if ((skb->mark & n->mask) != n->val) { + n = rcu_dereference_bh(n->next); goto next_knode; } else { - n->mark.success++; + __this_cpu_inc(*n->pcpu_success); } #endif @@ -139,37 +149,39 @@ next_knode: if (!data) goto out; if ((*data ^ key->val) & key->mask) { - n = n->next; + n = rcu_dereference_bh(n->next); goto next_knode; } #ifdef CONFIG_CLS_U32_PERF - n->pf->kcnts[j] += 1; + __this_cpu_inc(n->pf->kcnts[j]); j++; #endif } - if (n->ht_down == NULL) { + + ht = rcu_dereference_bh(n->ht_down); + if (!ht) { check_terminal: if (n->sel.flags & TC_U32_TERMINAL) { *res = n->res; #ifdef CONFIG_NET_CLS_IND if (!tcf_match_indev(skb, n->ifindex)) { - n = n->next; + n = rcu_dereference_bh(n->next); goto next_knode; } #endif #ifdef CONFIG_CLS_U32_PERF - n->pf->rhit += 1; + __this_cpu_inc(n->pf->rhit); #endif r = tcf_exts_exec(skb, &n->exts, res); if (r < 0) { - n = n->next; + n = rcu_dereference_bh(n->next); goto next_knode; } return r; } - n = n->next; + n = rcu_dereference_bh(n->next); goto next_knode; } @@ -180,7 +192,7 @@ check_terminal: stack[sdepth].off = off; sdepth++; - ht = n->ht_down; + ht = rcu_dereference_bh(n->ht_down); sel = 0; if (ht->divisor) { __be32 *data, hdata; @@ -222,7 +234,7 @@ check_terminal: /* POP */ if (sdepth--) { n = stack[sdepth].knode; - ht = n->ht_up; + ht = rcu_dereference_bh(n->ht_up); off = stack[sdepth].off; goto check_terminal; } @@ -239,7 +251,9 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) { struct tc_u_hnode *ht; - for (ht = tp_c->hlist; ht; ht = ht->next) + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) if (ht->handle == handle) break; @@ -256,7 +270,9 @@ u32_lookup_key(struct tc_u_hnode *ht, u32 handle) if (sel > ht->divisor) goto out; - for (n = ht->ht[sel]; n; n = n->next) + for (n = rtnl_dereference(ht->ht[sel]); + n; + n = rtnl_dereference(n->next)) if (n->handle == handle) break; out: @@ -270,7 +286,7 @@ static unsigned long u32_get(struct tcf_proto *tp, u32 handle) struct tc_u_common *tp_c = tp->data; if (TC_U32_HTID(handle) == TC_U32_ROOT) - ht = tp->root; + ht = rtnl_dereference(tp->root); else ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); @@ -291,6 +307,9 @@ static u32 gen_new_htid(struct tc_u_common *tp_c) { int i = 0x800; + /* hgenerator only used inside rtnl lock it is safe to increment + * without read _copy_ update semantics + */ do { if (++tp_c->hgenerator == 0x7FF) tp_c->hgenerator = 1; @@ -326,41 +345,78 @@ static int u32_init(struct tcf_proto *tp) } tp_c->refcnt++; - root_ht->next = tp_c->hlist; - tp_c->hlist = root_ht; + RCU_INIT_POINTER(root_ht->next, tp_c->hlist); + rcu_assign_pointer(tp_c->hlist, root_ht); root_ht->tp_c = tp_c; - tp->root = root_ht; + rcu_assign_pointer(tp->root, root_ht); tp->data = tp_c; return 0; } -static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n) +static int u32_destroy_key(struct tcf_proto *tp, + struct tc_u_knode *n, + bool free_pf) { tcf_unbind_filter(tp, &n->res); tcf_exts_destroy(tp, &n->exts); if (n->ht_down) n->ht_down->refcnt--; #ifdef CONFIG_CLS_U32_PERF - kfree(n->pf); + if (free_pf) + free_percpu(n->pf); +#endif +#ifdef CONFIG_CLS_U32_MARK + if (free_pf) + free_percpu(n->pcpu_success); #endif kfree(n); return 0; } +/* u32_delete_key_rcu should be called when free'ing a copied + * version of a tc_u_knode obtained from u32_init_knode(). When + * copies are obtained from u32_init_knode() the statistics are + * shared between the old and new copies to allow readers to + * continue to update the statistics during the copy. To support + * this the u32_delete_key_rcu variant does not free the percpu + * statistics. + */ +static void u32_delete_key_rcu(struct rcu_head *rcu) +{ + struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); + + u32_destroy_key(key->tp, key, false); +} + +/* u32_delete_key_freepf_rcu is the rcu callback variant + * that free's the entire structure including the statistics + * percpu variables. Only use this if the key is not a copy + * returned by u32_init_knode(). See u32_delete_key_rcu() + * for the variant that should be used with keys return from + * u32_init_knode() + */ +static void u32_delete_key_freepf_rcu(struct rcu_head *rcu) +{ + struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); + + u32_destroy_key(key->tp, key, true); +} + static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) { - struct tc_u_knode **kp; - struct tc_u_hnode *ht = key->ht_up; + struct tc_u_knode __rcu **kp; + struct tc_u_knode *pkp; + struct tc_u_hnode *ht = rtnl_dereference(key->ht_up); if (ht) { - for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) { - if (*kp == key) { - tcf_tree_lock(tp); - *kp = key->next; - tcf_tree_unlock(tp); + kp = &ht->ht[TC_U32_HASH(key->handle)]; + for (pkp = rtnl_dereference(*kp); pkp; + kp = &pkp->next, pkp = rtnl_dereference(*kp)) { + if (pkp == key) { + RCU_INIT_POINTER(*kp, key->next); - u32_destroy_key(tp, key); + call_rcu(&key->rcu, u32_delete_key_freepf_rcu); return 0; } } @@ -369,16 +425,16 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) return 0; } -static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) +static void u32_clear_hnode(struct tc_u_hnode *ht) { struct tc_u_knode *n; unsigned int h; for (h = 0; h <= ht->divisor; h++) { - while ((n = ht->ht[h]) != NULL) { - ht->ht[h] = n->next; - - u32_destroy_key(tp, n); + while ((n = rtnl_dereference(ht->ht[h])) != NULL) { + RCU_INIT_POINTER(ht->ht[h], + rtnl_dereference(n->next)); + call_rcu(&n->rcu, u32_delete_key_freepf_rcu); } } } @@ -386,28 +442,31 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) { struct tc_u_common *tp_c = tp->data; - struct tc_u_hnode **hn; + struct tc_u_hnode __rcu **hn; + struct tc_u_hnode *phn; WARN_ON(ht->refcnt); - u32_clear_hnode(tp, ht); + u32_clear_hnode(ht); - for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) { - if (*hn == ht) { - *hn = ht->next; - kfree(ht); + hn = &tp_c->hlist; + for (phn = rtnl_dereference(*hn); + phn; + hn = &phn->next, phn = rtnl_dereference(*hn)) { + if (phn == ht) { + RCU_INIT_POINTER(*hn, ht->next); + kfree_rcu(ht, rcu); return 0; } } - WARN_ON(1); return -ENOENT; } static void u32_destroy(struct tcf_proto *tp) { struct tc_u_common *tp_c = tp->data; - struct tc_u_hnode *root_ht = tp->root; + struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); WARN_ON(root_ht == NULL); @@ -419,17 +478,16 @@ static void u32_destroy(struct tcf_proto *tp) tp->q->u32_node = NULL; - for (ht = tp_c->hlist; ht; ht = ht->next) { + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) { ht->refcnt--; - u32_clear_hnode(tp, ht); + u32_clear_hnode(ht); } - while ((ht = tp_c->hlist) != NULL) { - tp_c->hlist = ht->next; - - WARN_ON(ht->refcnt != 0); - - kfree(ht); + while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) { + RCU_INIT_POINTER(tp_c->hlist, ht->next); + kfree_rcu(ht, rcu); } kfree(tp_c); @@ -441,6 +499,7 @@ static void u32_destroy(struct tcf_proto *tp) static int u32_delete(struct tcf_proto *tp, unsigned long arg) { struct tc_u_hnode *ht = (struct tc_u_hnode *)arg; + struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); if (ht == NULL) return 0; @@ -448,7 +507,7 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg) if (TC_U32_KEY(ht->handle)) return u32_delete_key(tp, (struct tc_u_knode *)ht); - if (tp->root == ht) + if (root_ht == ht) return -EINVAL; if (ht->refcnt == 1) { @@ -471,7 +530,9 @@ static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) if (!bitmap) return handle | 0xFFF; - for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) + for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]); + n; + n = rtnl_dereference(n->next)) set_bit(TC_U32_NODE(n->handle), bitmap); i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800); @@ -521,10 +582,8 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, ht_down->refcnt++; } - tcf_tree_lock(tp); - ht_old = n->ht_down; - n->ht_down = ht_down; - tcf_tree_unlock(tp); + ht_old = rtnl_dereference(n->ht_down); + rcu_assign_pointer(n->ht_down, ht_down); if (ht_old) ht_old->refcnt--; @@ -551,6 +610,82 @@ errout: return err; } +static void u32_replace_knode(struct tcf_proto *tp, + struct tc_u_common *tp_c, + struct tc_u_knode *n) +{ + struct tc_u_knode __rcu **ins; + struct tc_u_knode *pins; + struct tc_u_hnode *ht; + + if (TC_U32_HTID(n->handle) == TC_U32_ROOT) + ht = rtnl_dereference(tp->root); + else + ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle)); + + ins = &ht->ht[TC_U32_HASH(n->handle)]; + + /* The node must always exist for it to be replaced if this is not the + * case then something went very wrong elsewhere. + */ + for (pins = rtnl_dereference(*ins); ; + ins = &pins->next, pins = rtnl_dereference(*ins)) + if (pins->handle == n->handle) + break; + + RCU_INIT_POINTER(n->next, pins->next); + rcu_assign_pointer(*ins, n); +} + +static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp, + struct tc_u_knode *n) +{ + struct tc_u_knode *new; + struct tc_u32_sel *s = &n->sel; + + new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), + GFP_KERNEL); + + if (!new) + return NULL; + + RCU_INIT_POINTER(new->next, n->next); + new->handle = n->handle; + RCU_INIT_POINTER(new->ht_up, n->ht_up); + +#ifdef CONFIG_NET_CLS_IND + new->ifindex = n->ifindex; +#endif + new->fshift = n->fshift; + new->res = n->res; + RCU_INIT_POINTER(new->ht_down, n->ht_down); + + /* bump reference count as long as we hold pointer to structure */ + if (new->ht_down) + new->ht_down->refcnt++; + +#ifdef CONFIG_CLS_U32_PERF + /* Statistics may be incremented by readers during update + * so we must keep them in tact. When the node is later destroyed + * a special destroy call must be made to not free the pf memory. + */ + new->pf = n->pf; +#endif + +#ifdef CONFIG_CLS_U32_MARK + new->val = n->val; + new->mask = n->mask; + /* Similarly success statistics must be moved as pointers */ + new->pcpu_success = n->pcpu_success; +#endif + new->tp = tp; + memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); + + tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE); + + return new; +} + static int u32_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, @@ -564,6 +699,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, struct nlattr *tb[TCA_U32_MAX + 1]; u32 htid; int err; +#ifdef CONFIG_CLS_U32_PERF + size_t size; +#endif if (opt == NULL) return handle ? -EINVAL : 0; @@ -574,11 +712,27 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, n = (struct tc_u_knode *)*arg; if (n) { + struct tc_u_knode *new; + if (TC_U32_KEY(n->handle) == 0) return -EINVAL; - return u32_set_parms(net, tp, base, n->ht_up, n, tb, - tca[TCA_RATE], ovr); + new = u32_init_knode(tp, n); + if (!new) + return -ENOMEM; + + err = u32_set_parms(net, tp, base, + rtnl_dereference(n->ht_up), new, tb, + tca[TCA_RATE], ovr); + + if (err) { + u32_destroy_key(tp, new, false); + return err; + } + + u32_replace_knode(tp, tp_c, new); + call_rcu(&n->rcu, u32_delete_key_rcu); + return 0; } if (tb[TCA_U32_DIVISOR]) { @@ -601,8 +755,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ht->divisor = divisor; ht->handle = handle; ht->prio = tp->prio; - ht->next = tp_c->hlist; - tp_c->hlist = ht; + RCU_INIT_POINTER(ht->next, tp_c->hlist); + rcu_assign_pointer(tp_c->hlist, ht); *arg = (unsigned long)ht; return 0; } @@ -610,7 +764,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, if (tb[TCA_U32_HASH]) { htid = nla_get_u32(tb[TCA_U32_HASH]); if (TC_U32_HTID(htid) == TC_U32_ROOT) { - ht = tp->root; + ht = rtnl_dereference(tp->root); htid = ht->handle; } else { ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); @@ -618,7 +772,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; } } else { - ht = tp->root; + ht = rtnl_dereference(tp->root); htid = ht->handle; } @@ -642,46 +796,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -ENOBUFS; #ifdef CONFIG_CLS_U32_PERF - n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); - if (n->pf == NULL) { + size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64); + n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt)); + if (!n->pf) { kfree(n); return -ENOBUFS; } #endif memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); - n->ht_up = ht; + RCU_INIT_POINTER(n->ht_up, ht); n->handle = handle; n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); + n->tp = tp; #ifdef CONFIG_CLS_U32_MARK + n->pcpu_success = alloc_percpu(u32); + if (!n->pcpu_success) { + err = -ENOMEM; + goto errout; + } + if (tb[TCA_U32_MARK]) { struct tc_u32_mark *mark; mark = nla_data(tb[TCA_U32_MARK]); - memcpy(&n->mark, mark, sizeof(struct tc_u32_mark)); - n->mark.success = 0; + n->val = mark->val; + n->mask = mark->mask; } #endif err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr); if (err == 0) { - struct tc_u_knode **ins; - for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) - if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle)) + struct tc_u_knode __rcu **ins; + struct tc_u_knode *pins; + + ins = &ht->ht[TC_U32_HASH(handle)]; + for (pins = rtnl_dereference(*ins); pins; + ins = &pins->next, pins = rtnl_dereference(*ins)) + if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle)) break; - n->next = *ins; - tcf_tree_lock(tp); - *ins = n; - tcf_tree_unlock(tp); + RCU_INIT_POINTER(n->next, pins); + rcu_assign_pointer(*ins, n); *arg = (unsigned long)n; return 0; } + +#ifdef CONFIG_CLS_U32_MARK + free_percpu(n->pcpu_success); +errout: +#endif + #ifdef CONFIG_CLS_U32_PERF - kfree(n->pf); + free_percpu(n->pf); #endif kfree(n); return err; @@ -697,7 +867,9 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) if (arg->stop) return; - for (ht = tp_c->hlist; ht; ht = ht->next) { + for (ht = rtnl_dereference(tp_c->hlist); + ht; + ht = rtnl_dereference(ht->next)) { if (ht->prio != tp->prio) continue; if (arg->count >= arg->skip) { @@ -708,7 +880,9 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) } arg->count++; for (h = 0; h <= ht->divisor; h++) { - for (n = ht->ht[h]; n; n = n->next) { + for (n = rtnl_dereference(ht->ht[h]); + n; + n = rtnl_dereference(n->next)) { if (arg->count < arg->skip) { arg->count++; continue; @@ -727,6 +901,7 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { struct tc_u_knode *n = (struct tc_u_knode *)fh; + struct tc_u_hnode *ht_up, *ht_down; struct nlattr *nest; if (n == NULL) @@ -745,11 +920,18 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) goto nla_put_failure; } else { +#ifdef CONFIG_CLS_U32_PERF + struct tc_u32_pcnt *gpf; + int cpu; +#endif + if (nla_put(skb, TCA_U32_SEL, sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), &n->sel)) goto nla_put_failure; - if (n->ht_up) { + + ht_up = rtnl_dereference(n->ht_up); + if (ht_up) { u32 htid = n->handle & 0xFFFFF000; if (nla_put_u32(skb, TCA_U32_HASH, htid)) goto nla_put_failure; @@ -757,14 +939,28 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, if (n->res.classid && nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid)) goto nla_put_failure; - if (n->ht_down && - nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle)) + + ht_down = rtnl_dereference(n->ht_down); + if (ht_down && + nla_put_u32(skb, TCA_U32_LINK, ht_down->handle)) goto nla_put_failure; #ifdef CONFIG_CLS_U32_MARK - if ((n->mark.val || n->mark.mask) && - nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark)) - goto nla_put_failure; + if ((n->val || n->mask)) { + struct tc_u32_mark mark = {.val = n->val, + .mask = n->mask, + .success = 0}; + int cpum; + + for_each_possible_cpu(cpum) { + __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum); + + mark.success += cnt; + } + + if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark)) + goto nla_put_failure; + } #endif if (tcf_exts_dump(skb, &n->exts) < 0) @@ -779,10 +975,29 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, } #endif #ifdef CONFIG_CLS_U32_PERF + gpf = kzalloc(sizeof(struct tc_u32_pcnt) + + n->sel.nkeys * sizeof(u64), + GFP_KERNEL); + if (!gpf) + goto nla_put_failure; + + for_each_possible_cpu(cpu) { + int i; + struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); + + gpf->rcnt += pf->rcnt; + gpf->rhit += pf->rhit; + for (i = 0; i < n->sel.nkeys; i++) + gpf->kcnts[i] += pf->kcnts[i]; + } + if (nla_put(skb, TCA_U32_PCNT, sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), - n->pf)) + gpf)) { + kfree(gpf); goto nla_put_failure; + } + kfree(gpf); #endif } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 58bed7599db7..ca6248345937 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1781,7 +1781,7 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp, __be16 protocol = skb->protocol; int err; - for (; tp; tp = tp->next) { + for (; tp; tp = rcu_dereference_bh(tp->next)) { if (tp->protocol != protocol && tp->protocol != htons(ETH_P_ALL)) continue; @@ -1833,15 +1833,15 @@ void tcf_destroy(struct tcf_proto *tp) { tp->ops->destroy(tp); module_put(tp->ops->owner); - kfree(tp); + kfree_rcu(tp, rcu); } -void tcf_destroy_chain(struct tcf_proto **fl) +void tcf_destroy_chain(struct tcf_proto __rcu **fl) { struct tcf_proto *tp; - while ((tp = *fl) != NULL) { - *fl = tp->next; + while ((tp = rtnl_dereference(*fl)) != NULL) { + RCU_INIT_POINTER(*fl, tp->next); tcf_destroy(tp); } } diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 8449b337f9e3..c398f9c3dbdd 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -41,7 +41,7 @@ struct atm_flow_data { struct Qdisc *q; /* FIFO, TBF, etc. */ - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */ void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); /* chaining */ @@ -273,7 +273,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, error = -ENOBUFS; goto err_out; } - flow->filter_list = NULL; + RCU_INIT_POINTER(flow->filter_list, NULL); flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); if (!flow->q) flow->q = &noop_qdisc; @@ -311,7 +311,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg) pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); if (list_empty(&flow->list)) return -EINVAL; - if (flow->filter_list || flow == &p->link) + if (rcu_access_pointer(flow->filter_list) || flow == &p->link) return -EBUSY; /* * Reference count must be 2: one for "keepalive" (set at class @@ -345,7 +345,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker) } } -static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct atm_qdisc_data *p = qdisc_priv(sch); struct atm_flow_data *flow = (struct atm_flow_data *)cl; @@ -369,11 +370,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) flow = NULL; if (TC_H_MAJ(skb->priority) != sch->handle || !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) { + struct tcf_proto *fl; + list_for_each_entry(flow, &p->flows, list) { - if (flow->filter_list) { - result = tc_classify_compat(skb, - flow->filter_list, - &res); + fl = rcu_dereference_bh(flow->filter_list); + if (fl) { + result = tc_classify_compat(skb, fl, &res); if (result < 0) continue; flow = (struct atm_flow_data *)res.class; @@ -544,7 +546,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) if (!p->link.q) p->link.q = &noop_qdisc; pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); - p->link.filter_list = NULL; + RCU_INIT_POINTER(p->link.filter_list, NULL); p->link.vcc = NULL; p->link.sock = NULL; p->link.classid = sch->handle; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 762a04bb8f6d..a3244a800501 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -133,7 +133,7 @@ struct cbq_class { struct gnet_stats_rate_est64 rate_est; struct tc_cbq_xstats xstats; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; int refcnt; int filters; @@ -221,6 +221,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) struct cbq_class **defmap; struct cbq_class *cl = NULL; u32 prio = skb->priority; + struct tcf_proto *fl; struct tcf_result res; /* @@ -235,11 +236,12 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) int result = 0; defmap = head->defaults; + fl = rcu_dereference_bh(head->filter_list); /* * Step 2+n. Apply classifier. */ - if (!head->filter_list || - (result = tc_classify_compat(skb, head->filter_list, &res)) < 0) + result = tc_classify_compat(skb, fl, &res); + if (!fl || result < 0) goto fallback; cl = (void *)res.class; @@ -1954,7 +1956,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) return 0; } -static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg) +static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch, + unsigned long arg) { struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_class *cl = (struct cbq_class *)arg; diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index fb666d1e4de3..8abc2625c3a1 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -57,7 +57,7 @@ struct choke_sched_data { /* Variables */ struct red_vars vars; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct { u32 prob_drop; /* Early probability drops */ u32 prob_mark; /* Early probability marks */ @@ -203,9 +203,11 @@ static bool choke_classify(struct sk_buff *skb, { struct choke_sched_data *q = qdisc_priv(sch); struct tcf_result res; + struct tcf_proto *fl; int result; - result = tc_classify(skb, q->filter_list, &res); + fl = rcu_dereference_bh(q->filter_list); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -259,7 +261,7 @@ static bool choke_match_random(const struct choke_sched_data *q, return false; oskb = choke_peek_random(q, pidx); - if (q->filter_list) + if (rcu_access_pointer(q->filter_list)) return choke_get_classid(nskb) == choke_get_classid(oskb); return choke_match_flow(oskb, nskb); @@ -267,11 +269,11 @@ static bool choke_match_random(const struct choke_sched_data *q, static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) { + int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; struct choke_sched_data *q = qdisc_priv(sch); const struct red_parms *p = &q->parms; - int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - if (q->filter_list) { + if (rcu_access_pointer(q->filter_list)) { /* If using external classifiers, get result and record it. */ if (!choke_classify(skb, sch, &ret)) goto other_drop; /* Packet was eaten by filter */ @@ -564,7 +566,8 @@ static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent, return 0; } -static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **choke_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct choke_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 7bbbfe112192..d8b5ccfd248a 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -35,7 +35,7 @@ struct drr_class { struct drr_sched { struct list_head active; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct Qdisc_class_hash clhash; }; @@ -184,7 +184,8 @@ static void drr_put_class(struct Qdisc *sch, unsigned long arg) drr_destroy_class(sch, cl); } -static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch, + unsigned long cl) { struct drr_sched *q = qdisc_priv(sch); @@ -319,6 +320,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; struct tcf_result res; + struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { @@ -328,7 +330,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, q->filter_list, &res); + fl = rcu_dereference_bh(q->filter_list); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 49d6ef338b55..485e456c8139 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -37,7 +37,7 @@ struct dsmark_qdisc_data { struct Qdisc *q; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; u8 *mask; /* "owns" the array */ u8 *value; u16 indices; @@ -186,8 +186,8 @@ ignore: } } -static inline struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch, - unsigned long cl) +static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct dsmark_qdisc_data *p = qdisc_priv(sch); return &p->filter_list; @@ -229,7 +229,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) skb->tc_index = TC_H_MIN(skb->priority); else { struct tcf_result res; - int result = tc_classify(skb, p->filter_list, &res); + struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); + int result = tc_classify(skb, fl, &res); pr_debug("result %d class 0x%04x\n", result, res.classid); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index ba32c2b005d0..e12f997e1b4c 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -416,7 +416,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now) static struct sk_buff *fq_dequeue(struct Qdisc *sch) { struct fq_sched_data *q = qdisc_priv(sch); - u64 now = ktime_to_ns(ktime_get()); + u64 now = ktime_get_ns(); struct fq_flow_head *head; struct sk_buff *skb; struct fq_flow *f; @@ -787,7 +787,7 @@ nla_put_failure: static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct fq_sched_data *q = qdisc_priv(sch); - u64 now = ktime_to_ns(ktime_get()); + u64 now = ktime_get_ns(); struct tc_fq_qd_stats st = { .gc_flows = q->stat_gc_flows, .highprio_packets = q->stat_internal_packets, diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 063b726bf1f8..105cf5557630 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -52,7 +52,7 @@ struct fq_codel_flow { }; /* please try to keep this structure <= 64 bytes */ struct fq_codel_sched_data { - struct tcf_proto *filter_list; /* optional external classifier */ + struct tcf_proto __rcu *filter_list; /* optional external classifier */ struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ u32 *backlogs; /* backlog table [flows_cnt] */ u32 flows_cnt; /* number of flows */ @@ -77,13 +77,15 @@ static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, hash = jhash_3words((__force u32)keys.dst, (__force u32)keys.src ^ keys.ip_proto, (__force u32)keys.ports, q->perturbation); - return ((u64)hash * q->flows_cnt) >> 32; + + return reciprocal_scale(hash, q->flows_cnt); } static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct fq_codel_sched_data *q = qdisc_priv(sch); + struct tcf_proto *filter; struct tcf_result res; int result; @@ -92,11 +94,12 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, TC_H_MIN(skb->priority) <= q->flows_cnt) return TC_H_MIN(skb->priority); - if (!q->filter_list) + filter = rcu_dereference(q->filter_list); + if (!filter) return fq_codel_hash(q, skb) + 1; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, q->filter_list, &res); + result = tc_classify(skb, filter, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -495,7 +498,8 @@ static void fq_codel_put(struct Qdisc *q, unsigned long cl) { } -static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct fq_codel_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index fc04fe93c2da..11b28f651ad1 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -63,15 +63,18 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) if (unlikely(skb)) { /* check the reason of requeuing without tx lock first */ - txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb)); + txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { q->gso_skb = NULL; q->q.qlen--; } else skb = NULL; } else { - if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq)) + if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq)) { skb = q->dequeue(q); + if (skb) + skb = validate_xmit_skb(skb, qdisc_dev(q)); + } } return skb; @@ -90,7 +93,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, * detect it by checking xmit owner and drop the packet when * deadloop is detected. Return OK to try the next skb. */ - kfree_skb(skb); + kfree_skb_list(skb); net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n", dev_queue->dev->name); ret = qdisc_qlen(q); @@ -107,9 +110,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, } /* - * Transmit one skb, and handle the return status as required. Holding the - * __QDISC___STATE_RUNNING bit guarantees that only one CPU can execute this - * function. + * Transmit possibly several skbs, and handle the return status as + * required. Holding the __QDISC___STATE_RUNNING bit guarantees that + * only one CPU can execute this function. * * Returns to the caller: * 0 - queue is empty or throttled. @@ -126,7 +129,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) - ret = dev_hard_start_xmit(skb, dev, txq); + skb = dev_hard_start_xmit(skb, dev, txq, &ret); HARD_TX_UNLOCK(dev, txq); @@ -183,10 +186,12 @@ static inline int qdisc_restart(struct Qdisc *q) skb = dequeue_skb(q); if (unlikely(!skb)) return 0; + WARN_ON_ONCE(skb_dst_is_noref(skb)); + root_lock = qdisc_lock(q); dev = qdisc_dev(q); - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + txq = skb_get_tx_queue(dev, skb); return sch_direct_xmit(skb, q, dev, txq, root_lock); } @@ -518,7 +523,7 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) - skb_queue_head_init(band2list(priv, prio)); + __skb_queue_head_init(band2list(priv, prio)); /* Can by-pass the queue discipline */ qdisc->flags |= TCQ_F_CAN_BYPASS; @@ -616,7 +621,7 @@ void qdisc_reset(struct Qdisc *qdisc) ops->reset(qdisc); if (qdisc->gso_skb) { - kfree_skb(qdisc->gso_skb); + kfree_skb_list(qdisc->gso_skb); qdisc->gso_skb = NULL; qdisc->q.qlen = 0; } @@ -652,7 +657,7 @@ void qdisc_destroy(struct Qdisc *qdisc) module_put(ops->owner); dev_put(qdisc_dev(qdisc)); - kfree_skb(qdisc->gso_skb); + kfree_skb_list(qdisc->gso_skb); /* * gen_estimator est_timer() might access qdisc->q.lock, * wait a RCU grace period before freeing qdisc. @@ -778,7 +783,7 @@ static void dev_deactivate_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; struct Qdisc *qdisc; - qdisc = dev_queue->qdisc; + qdisc = rtnl_dereference(dev_queue->qdisc); if (qdisc) { spin_lock_bh(qdisc_lock(qdisc)); @@ -871,7 +876,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, { struct Qdisc *qdisc = _qdisc; - dev_queue->qdisc = qdisc; + rcu_assign_pointer(dev_queue->qdisc, qdisc); dev_queue->qdisc_sleeping = qdisc; } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index ec8aeaac1dd7..04b0de4c68b5 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -116,7 +116,7 @@ struct hfsc_class { struct gnet_stats_queue qstats; struct gnet_stats_rate_est64 rate_est; unsigned int level; /* class level in hierarchy */ - struct tcf_proto *filter_list; /* filter list */ + struct tcf_proto __rcu *filter_list; /* filter list */ unsigned int filter_cnt; /* filter count */ struct hfsc_sched *sched; /* scheduler data */ @@ -1161,7 +1161,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; head = &q->root; - tcf = q->root.filter_list; + tcf = rcu_dereference_bh(q->root.filter_list); while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -1185,7 +1185,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return cl; /* hit leaf class */ /* apply inner filter chain */ - tcf = cl->filter_list; + tcf = rcu_dereference_bh(cl->filter_list); head = cl; } @@ -1285,7 +1285,7 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) cl->filter_cnt--; } -static struct tcf_proto ** +static struct tcf_proto __rcu ** hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg) { struct hfsc_sched *q = qdisc_priv(sch); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 9f949abcacef..14408f262143 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -103,7 +103,7 @@ struct htb_class { u32 prio; /* these two are used only by leaves... */ int quantum; /* but stored for parent-to-leaf return */ - struct tcf_proto *filter_list; /* class attached filters */ + struct tcf_proto __rcu *filter_list; /* class attached filters */ int filter_cnt; int refcnt; /* usage count of this class */ @@ -153,7 +153,7 @@ struct htb_sched { int rate2quantum; /* quant = rate / rate2quantum */ /* filters for qdisc itself */ - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; #define HTB_WARN_TOOMANYEVENTS 0x1 unsigned int warned; /* only one warning */ @@ -223,9 +223,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, if (cl->level == 0) return cl; /* Start with inner filter chain if a non-leaf class is selected */ - tcf = cl->filter_list; + tcf = rcu_dereference_bh(cl->filter_list); } else { - tcf = q->filter_list; + tcf = rcu_dereference_bh(q->filter_list); } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; @@ -251,7 +251,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, return cl; /* we hit leaf; return it */ /* we have got inner class; apply inner filter chain */ - tcf = cl->filter_list; + tcf = rcu_dereference_bh(cl->filter_list); } /* classification failed; try to use default class */ cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); @@ -895,7 +895,7 @@ ok: if (!sch->q.qlen) goto fin; - q->now = ktime_to_ns(ktime_get()); + q->now = ktime_get_ns(); start_at = jiffies; next_event = q->now + 5LLU * NSEC_PER_SEC; @@ -1044,7 +1044,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) qdisc_watchdog_init(&q->watchdog, sch); INIT_WORK(&q->work, htb_work_func); - skb_queue_head_init(&q->direct_queue); + __skb_queue_head_init(&q->direct_queue); if (tb[TCA_HTB_DIRECT_QLEN]) q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); @@ -1225,7 +1225,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, parent->un.leaf.q = new_q ? new_q : &noop_qdisc; parent->tokens = parent->buffer; parent->ctokens = parent->cbuffer; - parent->t_c = ktime_to_ns(ktime_get()); + parent->t_c = ktime_get_ns(); parent->cmode = HTB_CAN_SEND; } @@ -1455,7 +1455,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, cl->tokens = PSCHED_TICKS2NS(hopt->buffer); cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ - cl->t_c = ktime_to_ns(ktime_get()); + cl->t_c = ktime_get_ns(); cl->cmode = HTB_CAN_SEND; /* attach to the hash list and parent's family */ @@ -1519,11 +1519,12 @@ failure: return err; } -static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg) +static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch, + unsigned long arg) { struct htb_sched *q = qdisc_priv(sch); struct htb_class *cl = (struct htb_class *)arg; - struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list; + struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list; return fl; } diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 62871c14e1f9..b351125f3849 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -17,7 +17,7 @@ struct ingress_qdisc_data { - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; }; /* ------------------------- Class/flow operations ------------------------- */ @@ -46,7 +46,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) { } -static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct ingress_qdisc_data *p = qdisc_priv(sch); @@ -59,9 +60,10 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct ingress_qdisc_data *p = qdisc_priv(sch); struct tcf_result res; + struct tcf_proto *fl = rcu_dereference_bh(p->filter_list); int result; - result = tc_classify(skb, p->filter_list, &res); + result = tc_classify(skb, fl, &res); qdisc_bstats_update(sch, skb); switch (result) { diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 6749e2f540d0..37e7d25d21f1 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -231,7 +231,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) memset(&sch->qstats, 0, sizeof(sch->qstats)); for (i = 0; i < dev->num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc); spin_lock_bh(qdisc_lock(qdisc)); sch->q.qlen += qdisc->q.qlen; sch->bstats.bytes += qdisc->bstats.bytes; @@ -340,7 +340,9 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, spin_unlock_bh(d->lock); for (i = tc.offset; i < tc.offset + tc.count; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc; + struct netdev_queue *q = netdev_get_tx_queue(dev, i); + + qdisc = rtnl_dereference(q->qdisc); spin_lock_bh(qdisc_lock(qdisc)); bstats.bytes += qdisc->bstats.bytes; bstats.packets += qdisc->bstats.packets; diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index afb050a735fa..c0466c1840f3 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -31,7 +31,7 @@ struct multiq_sched_data { u16 bands; u16 max_bands; u16 curband; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct Qdisc **queues; }; @@ -42,10 +42,11 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) struct multiq_sched_data *q = qdisc_priv(sch); u32 band; struct tcf_result res; + struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - err = tc_classify(skb, q->filter_list, &res); + err = tc_classify(skb, fl, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: @@ -388,7 +389,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) } } -static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct multiq_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 79359b69ad8d..03ef99e52a5c 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -24,7 +24,7 @@ struct prio_sched_data { int bands; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; u8 prio2band[TC_PRIO_MAX+1]; struct Qdisc *queues[TCQ_PRIO_BANDS]; }; @@ -36,11 +36,13 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) struct prio_sched_data *q = qdisc_priv(sch); u32 band = skb->priority; struct tcf_result res; + struct tcf_proto *fl; int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (TC_H_MAJ(skb->priority) != sch->handle) { - err = tc_classify(skb, q->filter_list, &res); + fl = rcu_dereference_bh(q->filter_list); + err = tc_classify(skb, fl, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: @@ -50,7 +52,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return NULL; } #endif - if (!q->filter_list || err < 0) { + if (!fl || err < 0) { if (TC_H_MAJ(band)) band = 0; return q->queues[q->prio2band[band & TC_PRIO_MAX]]; @@ -351,7 +353,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg) } } -static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct prio_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 8056fb4e618a..602ea01a4ddd 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -181,7 +181,7 @@ struct qfq_group { }; struct qfq_sched { - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; struct Qdisc_class_hash clhash; u64 oldV, V; /* Precise virtual times. */ @@ -576,7 +576,8 @@ static void qfq_put_class(struct Qdisc *sch, unsigned long arg) qfq_destroy_class(sch, cl); } -static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch, + unsigned long cl) { struct qfq_sched *q = qdisc_priv(sch); @@ -704,6 +705,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl; struct tcf_result res; + struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) { @@ -714,7 +716,8 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, q->filter_list, &res); + fl = rcu_dereference_bh(q->filter_list); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 9b0f7093d970..1562fb2b3f46 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -55,7 +55,7 @@ struct sfb_bins { struct sfb_sched_data { struct Qdisc *qdisc; - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; unsigned long rehash_interval; unsigned long warmup_time; /* double buffering warmup time in jiffies */ u32 max; @@ -253,13 +253,13 @@ static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) return false; } -static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q, +static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, int *qerr, u32 *salt) { struct tcf_result res; int result; - result = tc_classify(skb, q->filter_list, &res); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -281,6 +281,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; + struct tcf_proto *fl; int i; u32 p_min = ~0; u32 minqlen = ~0; @@ -306,9 +307,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) } } - if (q->filter_list) { + fl = rcu_dereference_bh(q->filter_list); + if (fl) { /* If using external classifiers, get result and record it. */ - if (!sfb_classify(skb, q, &ret, &salt)) + if (!sfb_classify(skb, fl, &ret, &salt)) goto other_drop; keys.src = salt; keys.dst = 0; @@ -660,7 +662,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) } } -static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct sfb_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 1af2f73906d0..80c36bd54abc 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -125,7 +125,7 @@ struct sfq_sched_data { u8 cur_depth; /* depth of longest slot */ u8 flags; unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ - struct tcf_proto *filter_list; + struct tcf_proto __rcu *filter_list; sfq_index *ht; /* Hash table ('divisor' slots) */ struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ @@ -187,6 +187,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, { struct sfq_sched_data *q = qdisc_priv(sch); struct tcf_result res; + struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority) == sch->handle && @@ -194,13 +195,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, TC_H_MIN(skb->priority) <= q->divisor) return TC_H_MIN(skb->priority); - if (!q->filter_list) { + fl = rcu_dereference_bh(q->filter_list); + if (!fl) { skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys); return sfq_hash(q, skb) + 1; } *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; - result = tc_classify(skb, q->filter_list, &res); + result = tc_classify(skb, fl, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { @@ -310,11 +312,6 @@ static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb) slot->skblist_prev = skb; } -#define slot_queue_walk(slot, skb) \ - for (skb = slot->skblist_next; \ - skb != (struct sk_buff *)slot; \ - skb = skb->next) - static unsigned int sfq_drop(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); @@ -841,7 +838,8 @@ static void sfq_put(struct Qdisc *q, unsigned long cl) { } -static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) +static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch, + unsigned long cl) { struct sfq_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 18ff63433709..0c39b754083b 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -239,7 +239,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) s64 ptoks = 0; unsigned int len = qdisc_pkt_len(skb); - now = ktime_to_ns(ktime_get()); + now = ktime_get_ns(); toks = min_t(s64, now - q->t_c, q->buffer); if (tbf_peak_present(q)) { @@ -292,7 +292,7 @@ static void tbf_reset(struct Qdisc *sch) qdisc_reset(q->qdisc); sch->q.qlen = 0; - q->t_c = ktime_to_ns(ktime_get()); + q->t_c = ktime_get_ns(); q->tokens = q->buffer; q->ptokens = q->mtu; qdisc_watchdog_cancel(&q->watchdog); @@ -431,7 +431,7 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt) if (opt == NULL) return -EINVAL; - q->t_c = ktime_to_ns(ktime_get()); + q->t_c = ktime_get_ns(); qdisc_watchdog_init(&q->watchdog, sch); q->qdisc = &noop_qdisc; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index bd33793b527e..5cd291bd00e4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -96,11 +96,14 @@ teql_dequeue(struct Qdisc *sch) struct teql_sched_data *dat = qdisc_priv(sch); struct netdev_queue *dat_queue; struct sk_buff *skb; + struct Qdisc *q; skb = __skb_dequeue(&dat->q); dat_queue = netdev_get_tx_queue(dat->m->dev, 0); + q = rcu_dereference_bh(dat_queue->qdisc); + if (skb == NULL) { - struct net_device *m = qdisc_dev(dat_queue->qdisc); + struct net_device *m = qdisc_dev(q); if (m) { dat->m->slaves = sch; netif_wake_queue(m); @@ -108,7 +111,7 @@ teql_dequeue(struct Qdisc *sch) } else { qdisc_bstats_update(sch, skb); } - sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; + sch->q.qlen = dat->q.qlen + q->q.qlen; return skb; } @@ -157,9 +160,9 @@ teql_destroy(struct Qdisc *sch) txq = netdev_get_tx_queue(master->dev, 0); master->slaves = NULL; - root_lock = qdisc_root_sleeping_lock(txq->qdisc); + root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); spin_lock_bh(root_lock); - qdisc_reset(txq->qdisc); + qdisc_reset(rtnl_dereference(txq->qdisc)); spin_unlock_bh(root_lock); } } @@ -266,7 +269,7 @@ static inline int teql_resolve(struct sk_buff *skb, struct dst_entry *dst = skb_dst(skb); int res; - if (txq->qdisc == &noop_qdisc) + if (rcu_access_pointer(txq->qdisc) == &noop_qdisc) return -ENODEV; if (!dev->header_ops || !dst) @@ -301,7 +304,6 @@ restart: do { struct net_device *slave = qdisc_dev(q); struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); - const struct net_device_ops *slave_ops = slave->netdev_ops; if (slave_txq->qdisc_sleeping != q) continue; @@ -317,8 +319,8 @@ restart: unsigned int length = qdisc_pkt_len(skb); if (!netif_xmit_frozen_or_stopped(slave_txq) && - slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { - txq_trans_update(slave_txq); + netdev_start_xmit(skb, slave, slave_txq, false) == + NETDEV_TX_OK) { __netif_tx_unlock(slave_txq); master->slaves = NEXT_SLAVE(q); netif_wake_queue(dev); diff --git a/net/sctp/input.c b/net/sctp/input.c index c1b991294516..b6493b3f11a9 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -133,9 +133,13 @@ int sctp_rcv(struct sk_buff *skb) __skb_pull(skb, skb_transport_offset(skb)); if (skb->len < sizeof(struct sctphdr)) goto discard_it; - if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) && - sctp_rcv_checksum(net, skb) < 0) + + skb->csum_valid = 0; /* Previous value not applicable */ + if (skb_csum_unnecessary(skb)) + __skb_decr_checksum_unnecessary(skb); + else if (!sctp_checksum_disable && sctp_rcv_checksum(net, skb) < 0) goto discard_it; + skb->csum_valid = 1; skb_pull(skb, sizeof(struct sctphdr)); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 6240834f4b95..9d2c6c9facb6 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -366,7 +366,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) && ret != RTN_LOCAL && !sp->inet.freebind && - !sysctl_ip_nonlocal_bind) + !net->ipv4.sysctl_ip_nonlocal_bind) return 0; if (ipv6_only_sock(sctp_opt2sk(sp))) diff --git a/net/socket.c b/net/socket.c index 4cdbc107606f..ffd9cb46902b 100644 --- a/net/socket.c +++ b/net/socket.c @@ -610,7 +610,7 @@ void sock_release(struct socket *sock) } EXPORT_SYMBOL(sock_release); -void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) +void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) { u8 flags = *tx_flags; @@ -626,12 +626,9 @@ void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK) flags |= SKBTX_ACK_TSTAMP; - if (sock_flag(sk, SOCK_WIFI_STATUS)) - flags |= SKBTX_WIFI_STATUS; - *tx_flags = flags; } -EXPORT_SYMBOL(sock_tx_timestamp); +EXPORT_SYMBOL(__sock_tx_timestamp); static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) diff --git a/net/tipc/Makefile b/net/tipc/Makefile index a080c66d819a..b8a13caad59a 100644 --- a/net/tipc/Makefile +++ b/net/tipc/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_TIPC) := tipc.o tipc-y += addr.o bcast.o bearer.o config.o \ core.o link.o discover.o msg.o \ name_distr.o subscr.o name_table.o net.o \ - netlink.o node.o node_subscr.o port.o ref.o \ + netlink.o node.o node_subscr.o \ socket.o log.o eth_media.o server.o tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index dd13bfa09333..b2bbe69b2554 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -37,7 +37,6 @@ #include "core.h" #include "link.h" -#include "port.h" #include "socket.h" #include "msg.h" #include "bcast.h" @@ -300,8 +299,8 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) tipc_link_push_queue(bcl); bclink_set_last_sent(); } - if (unlikely(released && !list_empty(&bcl->waiting_ports))) - tipc_link_wakeup_ports(bcl, 0); + if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks))) + bclink->node.action_flags |= TIPC_WAKEUP_USERS; exit: tipc_bclink_unlock(); } @@ -840,9 +839,10 @@ int tipc_bclink_init(void) sprintf(bcbearer->media.name, "tipc-broadcast"); spin_lock_init(&bclink->lock); - INIT_LIST_HEAD(&bcl->waiting_ports); + __skb_queue_head_init(&bcl->waiting_sks); bcl->next_out_no = 1; spin_lock_init(&bclink->node.lock); + __skb_queue_head_init(&bclink->node.waiting_sks); bcl->owner = &bclink->node; bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); diff --git a/net/tipc/config.c b/net/tipc/config.c index 2b42403ad33a..876f4c6a2631 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -35,7 +35,7 @@ */ #include "core.h" -#include "port.h" +#include "socket.h" #include "name_table.h" #include "config.h" #include "server.h" @@ -266,7 +266,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area rep_tlv_buf = tipc_media_get_names(); break; case TIPC_CMD_SHOW_PORTS: - rep_tlv_buf = tipc_port_get_ports(); + rep_tlv_buf = tipc_sk_socks_show(); break; case TIPC_CMD_SHOW_STATS: rep_tlv_buf = tipc_show_stats(); diff --git a/net/tipc/core.c b/net/tipc/core.c index 676d18015dd8..a5737b8407dd 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -35,11 +35,10 @@ */ #include "core.h" -#include "ref.h" #include "name_table.h" #include "subscr.h" #include "config.h" -#include "port.h" +#include "socket.h" #include <linux/module.h> @@ -85,7 +84,7 @@ static void tipc_core_stop(void) tipc_netlink_stop(); tipc_subscr_stop(); tipc_nametbl_stop(); - tipc_ref_table_stop(); + tipc_sk_ref_table_stop(); tipc_socket_stop(); tipc_unregister_sysctl(); } @@ -99,7 +98,7 @@ static int tipc_core_start(void) get_random_bytes(&tipc_random, sizeof(tipc_random)); - err = tipc_ref_table_init(tipc_max_ports, tipc_random); + err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random); if (err) goto out_reftbl; @@ -139,7 +138,7 @@ out_socket: out_netlink: tipc_nametbl_stop(); out_nametbl: - tipc_ref_table_stop(); + tipc_sk_ref_table_stop(); out_reftbl: return err; } diff --git a/net/tipc/core.h b/net/tipc/core.h index bb26ed1ee966..f773b148722f 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -81,6 +81,7 @@ extern u32 tipc_own_addr __read_mostly; extern int tipc_max_ports __read_mostly; extern int tipc_net_id __read_mostly; extern int sysctl_tipc_rmem[3] __read_mostly; +extern int sysctl_tipc_named_timeout __read_mostly; /* * Other global variables @@ -187,8 +188,11 @@ static inline void k_term_timer(struct timer_list *timer) struct tipc_skb_cb { void *handle; - bool deferred; struct sk_buff *tail; + bool deferred; + bool wakeup_pending; + u16 chain_sz; + u16 chain_imp; }; #define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) diff --git a/net/tipc/link.c b/net/tipc/link.c index fb1485dc6736..65410e18b8a6 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -36,7 +36,6 @@ #include "core.h" #include "link.h" -#include "port.h" #include "socket.h" #include "name_distr.h" #include "discover.h" @@ -275,7 +274,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, link_init_max_pkt(l_ptr); l_ptr->next_out_no = 1; - INIT_LIST_HEAD(&l_ptr->waiting_ports); + __skb_queue_head_init(&l_ptr->waiting_sks); link_reset_statistics(l_ptr); @@ -322,66 +321,47 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down) } /** - * link_schedule_port - schedule port for deferred sending - * @l_ptr: pointer to link - * @origport: reference to sending port - * @sz: amount of data to be sent - * - * Schedules port for renewed sending of messages after link congestion - * has abated. + * link_schedule_user - schedule user for wakeup after congestion + * @link: congested link + * @oport: sending port + * @chain_sz: size of buffer chain that was attempted sent + * @imp: importance of message attempted sent + * Create pseudo msg to send back to user when congestion abates */ -static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) +static bool link_schedule_user(struct tipc_link *link, u32 oport, + uint chain_sz, uint imp) { - struct tipc_port *p_ptr; - struct tipc_sock *tsk; + struct sk_buff *buf; - spin_lock_bh(&tipc_port_list_lock); - p_ptr = tipc_port_lock(origport); - if (p_ptr) { - if (!list_empty(&p_ptr->wait_list)) - goto exit; - tsk = tipc_port_to_sock(p_ptr); - tsk->link_cong = 1; - p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); - list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); - l_ptr->stats.link_congs++; -exit: - tipc_port_unlock(p_ptr); - } - spin_unlock_bh(&tipc_port_list_lock); - return -ELINKCONG; + buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr, + tipc_own_addr, oport, 0, 0); + if (!buf) + return false; + TIPC_SKB_CB(buf)->chain_sz = chain_sz; + TIPC_SKB_CB(buf)->chain_imp = imp; + __skb_queue_tail(&link->waiting_sks, buf); + link->stats.link_congs++; + return true; } -void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) +/** + * link_prepare_wakeup - prepare users for wakeup after congestion + * @link: congested link + * Move a number of waiting users, as permitted by available space in + * the send queue, from link wait queue to node wait queue for wakeup + */ +static void link_prepare_wakeup(struct tipc_link *link) { - struct tipc_port *p_ptr; - struct tipc_sock *tsk; - struct tipc_port *temp_p_ptr; - int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size; - - if (all) - win = 100000; - if (win <= 0) - return; - if (!spin_trylock_bh(&tipc_port_list_lock)) - return; - if (link_congested(l_ptr)) - goto exit; - list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, - wait_list) { - if (win <= 0) + struct sk_buff_head *wq = &link->waiting_sks; + struct sk_buff *buf; + uint pend_qsz = link->out_queue_size; + + for (buf = skb_peek(wq); buf; buf = skb_peek(wq)) { + if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(buf)->chain_imp]) break; - tsk = tipc_port_to_sock(p_ptr); - list_del_init(&p_ptr->wait_list); - spin_lock_bh(p_ptr->lock); - tsk->link_cong = 0; - tipc_sock_wakeup(tsk); - win -= p_ptr->waiting_pkts; - spin_unlock_bh(p_ptr->lock); + pend_qsz += TIPC_SKB_CB(buf)->chain_sz; + __skb_queue_tail(&link->owner->waiting_sks, __skb_dequeue(wq)); } - -exit: - spin_unlock_bh(&tipc_port_list_lock); } /** @@ -423,6 +403,7 @@ void tipc_link_reset(struct tipc_link *l_ptr) u32 prev_state = l_ptr->state; u32 checkpoint = l_ptr->next_in_no; int was_active_link = tipc_link_is_active(l_ptr); + struct tipc_node *owner = l_ptr->owner; msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); @@ -450,9 +431,10 @@ void tipc_link_reset(struct tipc_link *l_ptr) kfree_skb(l_ptr->proto_msg_queue); l_ptr->proto_msg_queue = NULL; kfree_skb_list(l_ptr->oldest_deferred_in); - if (!list_empty(&l_ptr->waiting_ports)) - tipc_link_wakeup_ports(l_ptr, 1); - + if (!skb_queue_empty(&l_ptr->waiting_sks)) { + skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks); + owner->action_flags |= TIPC_WAKEUP_USERS; + } l_ptr->retransm_queue_head = 0; l_ptr->retransm_queue_size = 0; l_ptr->last_out = NULL; @@ -688,19 +670,23 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); - uint psz = msg_size(msg); uint imp = tipc_msg_tot_importance(msg); u32 oport = msg_tot_origport(msg); - if (likely(imp <= TIPC_CRITICAL_IMPORTANCE)) { - if (!msg_errcode(msg) && !msg_reroute_cnt(msg)) { - link_schedule_port(link, oport, psz); - return -ELINKCONG; - } - } else { + if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); tipc_link_reset(link); + goto drop; } + if (unlikely(msg_errcode(msg))) + goto drop; + if (unlikely(msg_reroute_cnt(msg))) + goto drop; + if (TIPC_SKB_CB(buf)->wakeup_pending) + return -ELINKCONG; + if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp)) + return -ELINKCONG; +drop: kfree_skb_list(buf); return -EHOSTUNREACH; } @@ -1202,8 +1188,10 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) if (unlikely(l_ptr->next_out)) tipc_link_push_queue(l_ptr); - if (unlikely(!list_empty(&l_ptr->waiting_ports))) - tipc_link_wakeup_ports(l_ptr, 0); + if (released && !skb_queue_empty(&l_ptr->waiting_sks)) { + link_prepare_wakeup(l_ptr); + l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS; + } /* Process the incoming packet */ if (unlikely(!link_working_working(l_ptr))) { diff --git a/net/tipc/link.h b/net/tipc/link.h index 782983ccd323..b567a3427fda 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -1,7 +1,7 @@ /* * net/tipc/link.h: Include file for TIPC link code * - * Copyright (c) 1995-2006, 2013, Ericsson AB + * Copyright (c) 1995-2006, 2013-2014, Ericsson AB * Copyright (c) 2004-2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -133,7 +133,7 @@ struct tipc_stats { * @retransm_queue_size: number of messages to retransmit * @retransm_queue_head: sequence number of first message to retransmit * @next_out: ptr to first unsent outbound message in queue - * @waiting_ports: linked list of ports waiting for link congestion to abate + * @waiting_sks: linked list of sockets waiting for link congestion to abate * @long_msg_seq_no: next identifier to use for outbound fragmented messages * @reasm_buf: head of partially reassembled inbound message fragments * @stats: collects statistics regarding link activity @@ -194,7 +194,7 @@ struct tipc_link { u32 retransm_queue_size; u32 retransm_queue_head; struct sk_buff *next_out; - struct list_head waiting_ports; + struct sk_buff_head waiting_sks; /* Fragmentation/reassembly */ u32 long_msg_seq_no; @@ -235,7 +235,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, void tipc_link_push_queue(struct tipc_link *l_ptr); u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, struct sk_buff *buf); -void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all); void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *start, u32 retransmits); diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 9680be6d388a..74745a47d72a 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -56,8 +56,35 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, msg_set_size(m, hsize); msg_set_prevnode(m, tipc_own_addr); msg_set_type(m, type); - msg_set_orignode(m, tipc_own_addr); - msg_set_destnode(m, destnode); + if (hsize > SHORT_H_SIZE) { + msg_set_orignode(m, tipc_own_addr); + msg_set_destnode(m, destnode); + } +} + +struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, + uint data_sz, u32 dnode, u32 onode, + u32 dport, u32 oport, int errcode) +{ + struct tipc_msg *msg; + struct sk_buff *buf; + + buf = tipc_buf_acquire(hdr_sz + data_sz); + if (unlikely(!buf)) + return NULL; + + msg = buf_msg(buf); + tipc_msg_init(msg, user, type, hdr_sz, dnode); + msg_set_size(msg, hdr_sz + data_sz); + msg_set_prevnode(msg, onode); + msg_set_origport(msg, oport); + msg_set_destport(msg, dport); + msg_set_errcode(msg, errcode); + if (hdr_sz > SHORT_H_SIZE) { + msg_set_orignode(msg, onode); + msg_set_destnode(msg, dnode); + } + return buf; } /* tipc_buf_append(): Append a buffer to the fragment list of another buffer @@ -155,7 +182,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, struct sk_buff *buf, *prev; char *pktpos; int rc; - + uint chain_sz = 0; msg_set_size(mhdr, msz); /* No fragmentation needed? */ @@ -166,6 +193,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, return -ENOMEM; skb_copy_to_linear_data(buf, mhdr, mhsz); pktpos = buf->data + mhsz; + TIPC_SKB_CB(buf)->chain_sz = 1; if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz)) return dsz; rc = -EFAULT; @@ -182,6 +210,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, *chain = buf = tipc_buf_acquire(pktmax); if (!buf) return -ENOMEM; + chain_sz = 1; pktpos = buf->data; skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); pktpos += INT_H_SIZE; @@ -215,6 +244,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, rc = -ENOMEM; goto error; } + chain_sz++; prev->next = buf; msg_set_type(&pkthdr, FRAGMENT); msg_set_size(&pkthdr, pktsz); @@ -224,7 +254,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, pktrem = pktsz - INT_H_SIZE; } while (1); - + TIPC_SKB_CB(*chain)->chain_sz = chain_sz; msg_set_type(buf_msg(buf), LAST_FRAGMENT); return dsz; error: diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 462fa194a6af..0ea7b695ac4d 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -442,6 +442,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m) #define NAME_DISTRIBUTOR 11 #define MSG_FRAGMENTER 12 #define LINK_CONFIG 13 +#define SOCK_WAKEUP 14 /* pseudo user */ /* * Connection management protocol message types @@ -732,6 +733,10 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode); void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, u32 destnode); +struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, + uint data_sz, u32 dnode, u32 onode, + u32 dport, u32 oport, int errcode); + int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu); diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index dcc15bcd5692..376d2bb51d8d 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -1,7 +1,7 @@ /* * net/tipc/name_distr.c: TIPC name distribution code * - * Copyright (c) 2000-2006, Ericsson AB + * Copyright (c) 2000-2006, 2014, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -71,6 +71,21 @@ static struct publ_list *publ_lists[] = { }; +int sysctl_tipc_named_timeout __read_mostly = 2000; + +/** + * struct tipc_dist_queue - queue holding deferred name table updates + */ +static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue); + +struct distr_queue_item { + struct distr_item i; + u32 dtype; + u32 node; + unsigned long expires; + struct list_head next; +}; + /** * publ_to_item - add publication info to a publication message */ @@ -263,54 +278,105 @@ static void named_purge_publ(struct publication *publ) } /** + * tipc_update_nametbl - try to process a nametable update and notify + * subscribers + * + * tipc_nametbl_lock must be held. + * Returns the publication item if successful, otherwise NULL. + */ +static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype) +{ + struct publication *publ = NULL; + + if (dtype == PUBLICATION) { + publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower), + ntohl(i->upper), + TIPC_CLUSTER_SCOPE, node, + ntohl(i->ref), ntohl(i->key)); + if (publ) { + tipc_nodesub_subscribe(&publ->subscr, node, publ, + (net_ev_handler) + named_purge_publ); + return true; + } + } else if (dtype == WITHDRAWAL) { + publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower), + node, ntohl(i->ref), + ntohl(i->key)); + if (publ) { + tipc_nodesub_unsubscribe(&publ->subscr); + kfree(publ); + return true; + } + } else { + pr_warn("Unrecognized name table message received\n"); + } + return false; +} + +/** + * tipc_named_add_backlog - add a failed name table update to the backlog + * + */ +static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node) +{ + struct distr_queue_item *e; + unsigned long now = get_jiffies_64(); + + e = kzalloc(sizeof(*e), GFP_ATOMIC); + if (!e) + return; + e->dtype = type; + e->node = node; + e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout); + memcpy(e, i, sizeof(*i)); + list_add_tail(&e->next, &tipc_dist_queue); +} + +/** + * tipc_named_process_backlog - try to process any pending name table updates + * from the network. + */ +void tipc_named_process_backlog(void) +{ + struct distr_queue_item *e, *tmp; + char addr[16]; + unsigned long now = get_jiffies_64(); + + list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { + if (time_after(e->expires, now)) { + if (!tipc_update_nametbl(&e->i, e->node, e->dtype)) + continue; + } else { + tipc_addr_string_fill(addr, e->node); + pr_warn_ratelimited("Dropping name table update (%d) of {%u, %u, %u} from %s key=%u\n", + e->dtype, ntohl(e->i.type), + ntohl(e->i.lower), + ntohl(e->i.upper), + addr, ntohl(e->i.key)); + } + list_del(&e->next); + kfree(e); + } +} + +/** * tipc_named_rcv - process name table update message sent by another node */ void tipc_named_rcv(struct sk_buff *buf) { - struct publication *publ; struct tipc_msg *msg = buf_msg(buf); struct distr_item *item = (struct distr_item *)msg_data(msg); u32 count = msg_data_sz(msg) / ITEM_SIZE; + u32 node = msg_orignode(msg); write_lock_bh(&tipc_nametbl_lock); while (count--) { - if (msg_type(msg) == PUBLICATION) { - publ = tipc_nametbl_insert_publ(ntohl(item->type), - ntohl(item->lower), - ntohl(item->upper), - TIPC_CLUSTER_SCOPE, - msg_orignode(msg), - ntohl(item->ref), - ntohl(item->key)); - if (publ) { - tipc_nodesub_subscribe(&publ->subscr, - msg_orignode(msg), - publ, - (net_ev_handler) - named_purge_publ); - } - } else if (msg_type(msg) == WITHDRAWAL) { - publ = tipc_nametbl_remove_publ(ntohl(item->type), - ntohl(item->lower), - msg_orignode(msg), - ntohl(item->ref), - ntohl(item->key)); - - if (publ) { - tipc_nodesub_unsubscribe(&publ->subscr); - kfree(publ); - } else { - pr_err("Unable to remove publication by node 0x%x\n" - " (type=%u, lower=%u, ref=%u, key=%u)\n", - msg_orignode(msg), ntohl(item->type), - ntohl(item->lower), ntohl(item->ref), - ntohl(item->key)); - } - } else { - pr_warn("Unrecognized name table message received\n"); - } + if (!tipc_update_nametbl(item, node, msg_type(msg))) + tipc_named_add_backlog(item, msg_type(msg), node); item++; } + tipc_named_process_backlog(); write_unlock_bh(&tipc_nametbl_lock); kfree_skb(buf); } diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h index 8afe32b7fc9a..b9e75feb3434 100644 --- a/net/tipc/name_distr.h +++ b/net/tipc/name_distr.h @@ -73,5 +73,6 @@ void named_cluster_distribute(struct sk_buff *buf); void tipc_named_node_up(u32 dnode); void tipc_named_rcv(struct sk_buff *buf); void tipc_named_reinit(void); +void tipc_named_process_backlog(void); #endif diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 9d7d37d95187..3a6a0a7c0759 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -39,7 +39,6 @@ #include "name_table.h" #include "name_distr.h" #include "subscr.h" -#include "port.h" #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ @@ -262,8 +261,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, /* Lower end overlaps existing entry => need an exact match */ if ((sseq->lower != lower) || (sseq->upper != upper)) { - pr_warn("Cannot publish {%u,%u,%u}, overlap error\n", - type, lower, upper); return NULL; } @@ -285,8 +282,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, /* Fail if upper end overlaps into an existing entry */ if ((inspos < nseq->first_free) && (upper >= nseq->sseqs[inspos].lower)) { - pr_warn("Cannot publish {%u,%u,%u}, overlap error\n", - type, lower, upper); return NULL; } @@ -678,6 +673,8 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, if (likely(publ)) { table.local_publ_count++; buf = tipc_named_publish(publ); + /* Any pending external events? */ + tipc_named_process_backlog(); } write_unlock_bh(&tipc_nametbl_lock); @@ -699,6 +696,8 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) if (likely(publ)) { table.local_publ_count--; buf = tipc_named_withdraw(publ); + /* Any pending external events? */ + tipc_named_process_backlog(); write_unlock_bh(&tipc_nametbl_lock); list_del_init(&publ->pport_list); kfree(publ); diff --git a/net/tipc/net.c b/net/tipc/net.c index 7fcc94998fea..93b9944a6a8b 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -38,7 +38,6 @@ #include "net.h" #include "name_distr.h" #include "subscr.h" -#include "port.h" #include "socket.h" #include "node.h" #include "config.h" @@ -111,7 +110,7 @@ int tipc_net_start(u32 addr) tipc_own_addr = addr; tipc_named_reinit(); - tipc_port_reinit(); + tipc_sk_reinit(); res = tipc_bclink_init(); if (res) return res; diff --git a/net/tipc/node.c b/net/tipc/node.c index f7069299943f..17e6378c4dfe 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -38,6 +38,7 @@ #include "config.h" #include "node.h" #include "name_distr.h" +#include "socket.h" #define NODE_HTABLE_SIZE 512 @@ -50,6 +51,13 @@ static u32 tipc_num_nodes; static u32 tipc_num_links; static DEFINE_SPINLOCK(node_list_lock); +struct tipc_sock_conn { + u32 port; + u32 peer_port; + u32 peer_node; + struct list_head list; +}; + /* * A trivial power-of-two bitmask technique is used for speed, since this * operation is done for every incoming TIPC packet. The number of hash table @@ -100,6 +108,8 @@ struct tipc_node *tipc_node_create(u32 addr) INIT_HLIST_NODE(&n_ptr->hash); INIT_LIST_HEAD(&n_ptr->list); INIT_LIST_HEAD(&n_ptr->nsub); + INIT_LIST_HEAD(&n_ptr->conn_sks); + __skb_queue_head_init(&n_ptr->waiting_sks); hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); @@ -136,6 +146,71 @@ void tipc_node_stop(void) spin_unlock_bh(&node_list_lock); } +int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) +{ + struct tipc_node *node; + struct tipc_sock_conn *conn; + + if (in_own_node(dnode)) + return 0; + + node = tipc_node_find(dnode); + if (!node) { + pr_warn("Connecting sock to node 0x%x failed\n", dnode); + return -EHOSTUNREACH; + } + conn = kmalloc(sizeof(*conn), GFP_ATOMIC); + if (!conn) + return -EHOSTUNREACH; + conn->peer_node = dnode; + conn->port = port; + conn->peer_port = peer_port; + + tipc_node_lock(node); + list_add_tail(&conn->list, &node->conn_sks); + tipc_node_unlock(node); + return 0; +} + +void tipc_node_remove_conn(u32 dnode, u32 port) +{ + struct tipc_node *node; + struct tipc_sock_conn *conn, *safe; + + if (in_own_node(dnode)) + return; + + node = tipc_node_find(dnode); + if (!node) + return; + + tipc_node_lock(node); + list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { + if (port != conn->port) + continue; + list_del(&conn->list); + kfree(conn); + } + tipc_node_unlock(node); +} + +void tipc_node_abort_sock_conns(struct list_head *conns) +{ + struct tipc_sock_conn *conn, *safe; + struct sk_buff *buf; + + list_for_each_entry_safe(conn, safe, conns, list) { + buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + SHORT_H_SIZE, 0, tipc_own_addr, + conn->peer_node, conn->port, + conn->peer_port, TIPC_ERR_NO_NODE); + if (likely(buf)) + tipc_sk_rcv(buf); + list_del(&conn->list); + kfree(conn); + } +} + /** * tipc_node_link_up - handle addition of link * @@ -474,6 +549,8 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) void tipc_node_unlock(struct tipc_node *node) { LIST_HEAD(nsub_list); + LIST_HEAD(conn_sks); + struct sk_buff_head waiting_sks; u32 addr = 0; if (likely(!node->action_flags)) { @@ -481,8 +558,14 @@ void tipc_node_unlock(struct tipc_node *node) return; } + __skb_queue_head_init(&waiting_sks); + if (node->action_flags & TIPC_WAKEUP_USERS) { + skb_queue_splice_init(&node->waiting_sks, &waiting_sks); + node->action_flags &= ~TIPC_WAKEUP_USERS; + } if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) { list_replace_init(&node->nsub, &nsub_list); + list_replace_init(&node->conn_sks, &conn_sks); node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN; } if (node->action_flags & TIPC_NOTIFY_NODE_UP) { @@ -491,8 +574,15 @@ void tipc_node_unlock(struct tipc_node *node) } spin_unlock_bh(&node->lock); + while (!skb_queue_empty(&waiting_sks)) + tipc_sk_rcv(__skb_dequeue(&waiting_sks)); + + if (!list_empty(&conn_sks)) + tipc_node_abort_sock_conns(&conn_sks); + if (!list_empty(&nsub_list)) tipc_nodesub_notify(&nsub_list); + if (addr) tipc_named_node_up(addr); } diff --git a/net/tipc/node.h b/net/tipc/node.h index b61716a8218e..522d6f3157b3 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -58,7 +58,8 @@ enum { TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2), TIPC_NOTIFY_NODE_DOWN = (1 << 3), - TIPC_NOTIFY_NODE_UP = (1 << 4) + TIPC_NOTIFY_NODE_UP = (1 << 4), + TIPC_WAKEUP_USERS = (1 << 5) }; /** @@ -115,6 +116,8 @@ struct tipc_node { int working_links; u32 signature; struct list_head nsub; + struct sk_buff_head waiting_sks; + struct list_head conn_sks; struct rcu_head rcu; }; @@ -133,6 +136,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len); void tipc_node_unlock(struct tipc_node *node); +int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port); +void tipc_node_remove_conn(u32 dnode, u32 port); static inline void tipc_node_lock(struct tipc_node *node) { diff --git a/net/tipc/port.c b/net/tipc/port.c deleted file mode 100644 index 7e096a5e7701..000000000000 --- a/net/tipc/port.c +++ /dev/null @@ -1,514 +0,0 @@ -/* - * net/tipc/port.c: TIPC port code - * - * Copyright (c) 1992-2007, 2014, Ericsson AB - * Copyright (c) 2004-2008, 2010-2013, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include "core.h" -#include "config.h" -#include "port.h" -#include "name_table.h" -#include "socket.h" - -/* Connection management: */ -#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */ - -#define MAX_REJECT_SIZE 1024 - -DEFINE_SPINLOCK(tipc_port_list_lock); - -static LIST_HEAD(ports); -static void port_handle_node_down(unsigned long ref); -static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err); -static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err); -static void port_timeout(unsigned long ref); - -/** - * tipc_port_peer_msg - verify message was sent by connected port's peer - * - * Handles cases where the node's network address has changed from - * the default of <0.0.0> to its configured setting. - */ -int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg) -{ - u32 peernode; - u32 orignode; - - if (msg_origport(msg) != tipc_port_peerport(p_ptr)) - return 0; - - orignode = msg_orignode(msg); - peernode = tipc_port_peernode(p_ptr); - return (orignode == peernode) || - (!orignode && (peernode == tipc_own_addr)) || - (!peernode && (orignode == tipc_own_addr)); -} - -/* tipc_port_init - intiate TIPC port and lock it - * - * Returns obtained reference if initialization is successful, zero otherwise - */ -u32 tipc_port_init(struct tipc_port *p_ptr, - const unsigned int importance) -{ - struct tipc_msg *msg; - u32 ref; - - ref = tipc_ref_acquire(p_ptr, &p_ptr->lock); - if (!ref) { - pr_warn("Port registration failed, ref. table exhausted\n"); - return 0; - } - - p_ptr->max_pkt = MAX_PKT_DEFAULT; - p_ptr->ref = ref; - INIT_LIST_HEAD(&p_ptr->wait_list); - INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); - k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); - INIT_LIST_HEAD(&p_ptr->publications); - INIT_LIST_HEAD(&p_ptr->port_list); - - /* - * Must hold port list lock while initializing message header template - * to ensure a change to node's own network address doesn't result - * in template containing out-dated network address information - */ - spin_lock_bh(&tipc_port_list_lock); - msg = &p_ptr->phdr; - tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0); - msg_set_origport(msg, ref); - list_add_tail(&p_ptr->port_list, &ports); - spin_unlock_bh(&tipc_port_list_lock); - return ref; -} - -void tipc_port_destroy(struct tipc_port *p_ptr) -{ - struct sk_buff *buf = NULL; - struct tipc_msg *msg = NULL; - u32 peer; - - tipc_withdraw(p_ptr, 0, NULL); - - spin_lock_bh(p_ptr->lock); - tipc_ref_discard(p_ptr->ref); - spin_unlock_bh(p_ptr->lock); - - k_cancel_timer(&p_ptr->timer); - if (p_ptr->connected) { - buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT); - tipc_nodesub_unsubscribe(&p_ptr->subscription); - msg = buf_msg(buf); - peer = msg_destnode(msg); - tipc_link_xmit(buf, peer, msg_link_selector(msg)); - } - spin_lock_bh(&tipc_port_list_lock); - list_del(&p_ptr->port_list); - list_del(&p_ptr->wait_list); - spin_unlock_bh(&tipc_port_list_lock); - k_term_timer(&p_ptr->timer); -} - -/* - * port_build_proto_msg(): create connection protocol message for port - * - * On entry the port must be locked and connected. - */ -static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr, - u32 type, u32 ack) -{ - struct sk_buff *buf; - struct tipc_msg *msg; - - buf = tipc_buf_acquire(INT_H_SIZE); - if (buf) { - msg = buf_msg(buf); - tipc_msg_init(msg, CONN_MANAGER, type, INT_H_SIZE, - tipc_port_peernode(p_ptr)); - msg_set_destport(msg, tipc_port_peerport(p_ptr)); - msg_set_origport(msg, p_ptr->ref); - msg_set_msgcnt(msg, ack); - buf->next = NULL; - } - return buf; -} - -static void port_timeout(unsigned long ref) -{ - struct tipc_port *p_ptr = tipc_port_lock(ref); - struct sk_buff *buf = NULL; - struct tipc_msg *msg = NULL; - - if (!p_ptr) - return; - - if (!p_ptr->connected) { - tipc_port_unlock(p_ptr); - return; - } - - /* Last probe answered ? */ - if (p_ptr->probing_state == TIPC_CONN_PROBING) { - buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT); - } else { - buf = port_build_proto_msg(p_ptr, CONN_PROBE, 0); - p_ptr->probing_state = TIPC_CONN_PROBING; - k_start_timer(&p_ptr->timer, p_ptr->probing_interval); - } - tipc_port_unlock(p_ptr); - msg = buf_msg(buf); - tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg)); -} - - -static void port_handle_node_down(unsigned long ref) -{ - struct tipc_port *p_ptr = tipc_port_lock(ref); - struct sk_buff *buf = NULL; - struct tipc_msg *msg = NULL; - - if (!p_ptr) - return; - buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE); - tipc_port_unlock(p_ptr); - msg = buf_msg(buf); - tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg)); -} - - -static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err) -{ - struct sk_buff *buf = port_build_peer_abort_msg(p_ptr, err); - - if (buf) { - struct tipc_msg *msg = buf_msg(buf); - msg_swap_words(msg, 4, 5); - msg_swap_words(msg, 6, 7); - buf->next = NULL; - } - return buf; -} - - -static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err) -{ - struct sk_buff *buf; - struct tipc_msg *msg; - u32 imp; - - if (!p_ptr->connected) - return NULL; - - buf = tipc_buf_acquire(BASIC_H_SIZE); - if (buf) { - msg = buf_msg(buf); - memcpy(msg, &p_ptr->phdr, BASIC_H_SIZE); - msg_set_hdr_sz(msg, BASIC_H_SIZE); - msg_set_size(msg, BASIC_H_SIZE); - imp = msg_importance(msg); - if (imp < TIPC_CRITICAL_IMPORTANCE) - msg_set_importance(msg, ++imp); - msg_set_errcode(msg, err); - buf->next = NULL; - } - return buf; -} - -static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id) -{ - struct publication *publ; - int ret; - - if (full_id) - ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:", - tipc_zone(tipc_own_addr), - tipc_cluster(tipc_own_addr), - tipc_node(tipc_own_addr), p_ptr->ref); - else - ret = tipc_snprintf(buf, len, "%-10u:", p_ptr->ref); - - if (p_ptr->connected) { - u32 dport = tipc_port_peerport(p_ptr); - u32 destnode = tipc_port_peernode(p_ptr); - - ret += tipc_snprintf(buf + ret, len - ret, - " connected to <%u.%u.%u:%u>", - tipc_zone(destnode), - tipc_cluster(destnode), - tipc_node(destnode), dport); - if (p_ptr->conn_type != 0) - ret += tipc_snprintf(buf + ret, len - ret, - " via {%u,%u}", p_ptr->conn_type, - p_ptr->conn_instance); - } else if (p_ptr->published) { - ret += tipc_snprintf(buf + ret, len - ret, " bound to"); - list_for_each_entry(publ, &p_ptr->publications, pport_list) { - if (publ->lower == publ->upper) - ret += tipc_snprintf(buf + ret, len - ret, - " {%u,%u}", publ->type, - publ->lower); - else - ret += tipc_snprintf(buf + ret, len - ret, - " {%u,%u,%u}", publ->type, - publ->lower, publ->upper); - } - } - ret += tipc_snprintf(buf + ret, len - ret, "\n"); - return ret; -} - -struct sk_buff *tipc_port_get_ports(void) -{ - struct sk_buff *buf; - struct tlv_desc *rep_tlv; - char *pb; - int pb_len; - struct tipc_port *p_ptr; - int str_len = 0; - - buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); - if (!buf) - return NULL; - rep_tlv = (struct tlv_desc *)buf->data; - pb = TLV_DATA(rep_tlv); - pb_len = ULTRA_STRING_MAX_LEN; - - spin_lock_bh(&tipc_port_list_lock); - list_for_each_entry(p_ptr, &ports, port_list) { - spin_lock_bh(p_ptr->lock); - str_len += port_print(p_ptr, pb, pb_len, 0); - spin_unlock_bh(p_ptr->lock); - } - spin_unlock_bh(&tipc_port_list_lock); - str_len += 1; /* for "\0" */ - skb_put(buf, TLV_SPACE(str_len)); - TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); - - return buf; -} - -void tipc_port_reinit(void) -{ - struct tipc_port *p_ptr; - struct tipc_msg *msg; - - spin_lock_bh(&tipc_port_list_lock); - list_for_each_entry(p_ptr, &ports, port_list) { - msg = &p_ptr->phdr; - msg_set_prevnode(msg, tipc_own_addr); - msg_set_orignode(msg, tipc_own_addr); - } - spin_unlock_bh(&tipc_port_list_lock); -} - -void tipc_acknowledge(u32 ref, u32 ack) -{ - struct tipc_port *p_ptr; - struct sk_buff *buf = NULL; - struct tipc_msg *msg; - - p_ptr = tipc_port_lock(ref); - if (!p_ptr) - return; - if (p_ptr->connected) - buf = port_build_proto_msg(p_ptr, CONN_ACK, ack); - - tipc_port_unlock(p_ptr); - if (!buf) - return; - msg = buf_msg(buf); - tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg)); -} - -int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, - struct tipc_name_seq const *seq) -{ - struct publication *publ; - u32 key; - - if (p_ptr->connected) - return -EINVAL; - key = p_ptr->ref + p_ptr->pub_count + 1; - if (key == p_ptr->ref) - return -EADDRINUSE; - - publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, - scope, p_ptr->ref, key); - if (publ) { - list_add(&publ->pport_list, &p_ptr->publications); - p_ptr->pub_count++; - p_ptr->published = 1; - return 0; - } - return -EINVAL; -} - -int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, - struct tipc_name_seq const *seq) -{ - struct publication *publ; - struct publication *tpubl; - int res = -EINVAL; - - if (!seq) { - list_for_each_entry_safe(publ, tpubl, - &p_ptr->publications, pport_list) { - tipc_nametbl_withdraw(publ->type, publ->lower, - publ->ref, publ->key); - } - res = 0; - } else { - list_for_each_entry_safe(publ, tpubl, - &p_ptr->publications, pport_list) { - if (publ->scope != scope) - continue; - if (publ->type != seq->type) - continue; - if (publ->lower != seq->lower) - continue; - if (publ->upper != seq->upper) - break; - tipc_nametbl_withdraw(publ->type, publ->lower, - publ->ref, publ->key); - res = 0; - break; - } - } - if (list_empty(&p_ptr->publications)) - p_ptr->published = 0; - return res; -} - -int tipc_port_connect(u32 ref, struct tipc_portid const *peer) -{ - struct tipc_port *p_ptr; - int res; - - p_ptr = tipc_port_lock(ref); - if (!p_ptr) - return -EINVAL; - res = __tipc_port_connect(ref, p_ptr, peer); - tipc_port_unlock(p_ptr); - return res; -} - -/* - * __tipc_port_connect - connect to a remote peer - * - * Port must be locked. - */ -int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr, - struct tipc_portid const *peer) -{ - struct tipc_msg *msg; - int res = -EINVAL; - - if (p_ptr->published || p_ptr->connected) - goto exit; - if (!peer->ref) - goto exit; - - msg = &p_ptr->phdr; - msg_set_destnode(msg, peer->node); - msg_set_destport(msg, peer->ref); - msg_set_type(msg, TIPC_CONN_MSG); - msg_set_lookup_scope(msg, 0); - msg_set_hdr_sz(msg, SHORT_H_SIZE); - - p_ptr->probing_interval = PROBING_INTERVAL; - p_ptr->probing_state = TIPC_CONN_OK; - p_ptr->connected = 1; - k_start_timer(&p_ptr->timer, p_ptr->probing_interval); - - tipc_nodesub_subscribe(&p_ptr->subscription, peer->node, - (void *)(unsigned long)ref, - (net_ev_handler)port_handle_node_down); - res = 0; -exit: - p_ptr->max_pkt = tipc_node_get_mtu(peer->node, ref); - return res; -} - -/* - * __tipc_disconnect - disconnect port from peer - * - * Port must be locked. - */ -int __tipc_port_disconnect(struct tipc_port *tp_ptr) -{ - if (tp_ptr->connected) { - tp_ptr->connected = 0; - /* let timer expire on it's own to avoid deadlock! */ - tipc_nodesub_unsubscribe(&tp_ptr->subscription); - return 0; - } - - return -ENOTCONN; -} - -/* - * tipc_port_disconnect(): Disconnect port form peer. - * This is a node local operation. - */ -int tipc_port_disconnect(u32 ref) -{ - struct tipc_port *p_ptr; - int res; - - p_ptr = tipc_port_lock(ref); - if (!p_ptr) - return -EINVAL; - res = __tipc_port_disconnect(p_ptr); - tipc_port_unlock(p_ptr); - return res; -} - -/* - * tipc_port_shutdown(): Send a SHUTDOWN msg to peer and disconnect - */ -int tipc_port_shutdown(u32 ref) -{ - struct tipc_msg *msg; - struct tipc_port *p_ptr; - struct sk_buff *buf = NULL; - - p_ptr = tipc_port_lock(ref); - if (!p_ptr) - return -EINVAL; - - buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN); - tipc_port_unlock(p_ptr); - msg = buf_msg(buf); - tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg)); - return tipc_port_disconnect(ref); -} diff --git a/net/tipc/port.h b/net/tipc/port.h deleted file mode 100644 index 3087da39ee47..000000000000 --- a/net/tipc/port.h +++ /dev/null @@ -1,190 +0,0 @@ -/* - * net/tipc/port.h: Include file for TIPC port code - * - * Copyright (c) 1994-2007, 2014, Ericsson AB - * Copyright (c) 2004-2007, 2010-2013, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _TIPC_PORT_H -#define _TIPC_PORT_H - -#include "ref.h" -#include "net.h" -#include "msg.h" -#include "node_subscr.h" - -#define TIPC_CONNACK_INTV 256 -#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) -#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \ - SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) - -/** - * struct tipc_port - TIPC port structure - * @lock: pointer to spinlock for controlling access to port - * @connected: non-zero if port is currently connected to a peer port - * @conn_type: TIPC type used when connection was established - * @conn_instance: TIPC instance used when connection was established - * @published: non-zero if port has one or more associated names - * @max_pkt: maximum packet size "hint" used when building messages sent by port - * @ref: unique reference to port in TIPC object registry - * @phdr: preformatted message header used when sending messages - * @port_list: adjacent ports in TIPC's global list of ports - * @wait_list: adjacent ports in list of ports waiting on link congestion - * @waiting_pkts: - * @publications: list of publications for port - * @pub_count: total # of publications port has made during its lifetime - * @probing_state: - * @probing_interval: - * @timer_ref: - * @subscription: "node down" subscription used to terminate failed connections - */ -struct tipc_port { - spinlock_t *lock; - int connected; - u32 conn_type; - u32 conn_instance; - int published; - u32 max_pkt; - u32 ref; - struct tipc_msg phdr; - struct list_head port_list; - struct list_head wait_list; - u32 waiting_pkts; - struct list_head publications; - u32 pub_count; - u32 probing_state; - u32 probing_interval; - struct timer_list timer; - struct tipc_node_subscr subscription; -}; - -extern spinlock_t tipc_port_list_lock; -struct tipc_port_list; - -/* - * TIPC port manipulation routines - */ -u32 tipc_port_init(struct tipc_port *p_ptr, - const unsigned int importance); - -void tipc_acknowledge(u32 port_ref, u32 ack); - -void tipc_port_destroy(struct tipc_port *p_ptr); - -int tipc_publish(struct tipc_port *p_ptr, unsigned int scope, - struct tipc_name_seq const *name_seq); - -int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope, - struct tipc_name_seq const *name_seq); - -int tipc_port_connect(u32 portref, struct tipc_portid const *port); - -int tipc_port_disconnect(u32 portref); - -int tipc_port_shutdown(u32 ref); - -/* - * The following routines require that the port be locked on entry - */ -int __tipc_port_disconnect(struct tipc_port *tp_ptr); -int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr, - struct tipc_portid const *peer); -int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg); - -struct sk_buff *tipc_port_get_ports(void); -void tipc_port_reinit(void); - -/** - * tipc_port_lock - lock port instance referred to and return its pointer - */ -static inline struct tipc_port *tipc_port_lock(u32 ref) -{ - return (struct tipc_port *)tipc_ref_lock(ref); -} - -/** - * tipc_port_unlock - unlock a port instance - * - * Can use pointer instead of tipc_ref_unlock() since port is already locked. - */ -static inline void tipc_port_unlock(struct tipc_port *p_ptr) -{ - spin_unlock_bh(p_ptr->lock); -} - -static inline u32 tipc_port_peernode(struct tipc_port *p_ptr) -{ - return msg_destnode(&p_ptr->phdr); -} - -static inline u32 tipc_port_peerport(struct tipc_port *p_ptr) -{ - return msg_destport(&p_ptr->phdr); -} - -static inline bool tipc_port_unreliable(struct tipc_port *port) -{ - return msg_src_droppable(&port->phdr) != 0; -} - -static inline void tipc_port_set_unreliable(struct tipc_port *port, - bool unreliable) -{ - msg_set_src_droppable(&port->phdr, unreliable ? 1 : 0); -} - -static inline bool tipc_port_unreturnable(struct tipc_port *port) -{ - return msg_dest_droppable(&port->phdr) != 0; -} - -static inline void tipc_port_set_unreturnable(struct tipc_port *port, - bool unreturnable) -{ - msg_set_dest_droppable(&port->phdr, unreturnable ? 1 : 0); -} - - -static inline int tipc_port_importance(struct tipc_port *port) -{ - return msg_importance(&port->phdr); -} - -static inline int tipc_port_set_importance(struct tipc_port *port, int imp) -{ - if (imp > TIPC_CRITICAL_IMPORTANCE) - return -EINVAL; - msg_set_importance(&port->phdr, (u32)imp); - return 0; -} - -#endif diff --git a/net/tipc/ref.c b/net/tipc/ref.c deleted file mode 100644 index 3d4ecd754eee..000000000000 --- a/net/tipc/ref.c +++ /dev/null @@ -1,266 +0,0 @@ -/* - * net/tipc/ref.c: TIPC object registry code - * - * Copyright (c) 1991-2006, Ericsson AB - * Copyright (c) 2004-2007, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#include "core.h" -#include "ref.h" - -/** - * struct reference - TIPC object reference entry - * @object: pointer to object associated with reference entry - * @lock: spinlock controlling access to object - * @ref: reference value for object (combines instance & array index info) - */ -struct reference { - void *object; - spinlock_t lock; - u32 ref; -}; - -/** - * struct tipc_ref_table - table of TIPC object reference entries - * @entries: pointer to array of reference entries - * @capacity: array index of first unusable entry - * @init_point: array index of first uninitialized entry - * @first_free: array index of first unused object reference entry - * @last_free: array index of last unused object reference entry - * @index_mask: bitmask for array index portion of reference values - * @start_mask: initial value for instance value portion of reference values - */ -struct ref_table { - struct reference *entries; - u32 capacity; - u32 init_point; - u32 first_free; - u32 last_free; - u32 index_mask; - u32 start_mask; -}; - -/* - * Object reference table consists of 2**N entries. - * - * State Object ptr Reference - * ----- ---------- --------- - * In use non-NULL XXXX|own index - * (XXXX changes each time entry is acquired) - * Free NULL YYYY|next free index - * (YYYY is one more than last used XXXX) - * Uninitialized NULL 0 - * - * Entry 0 is not used; this allows index 0 to denote the end of the free list. - * - * Note that a reference value of 0 does not necessarily indicate that an - * entry is uninitialized, since the last entry in the free list could also - * have a reference value of 0 (although this is unlikely). - */ - -static struct ref_table tipc_ref_table; - -static DEFINE_SPINLOCK(ref_table_lock); - -/** - * tipc_ref_table_init - create reference table for objects - */ -int tipc_ref_table_init(u32 requested_size, u32 start) -{ - struct reference *table; - u32 actual_size; - - /* account for unused entry, then round up size to a power of 2 */ - - requested_size++; - for (actual_size = 16; actual_size < requested_size; actual_size <<= 1) - /* do nothing */ ; - - /* allocate table & mark all entries as uninitialized */ - table = vzalloc(actual_size * sizeof(struct reference)); - if (table == NULL) - return -ENOMEM; - - tipc_ref_table.entries = table; - tipc_ref_table.capacity = requested_size; - tipc_ref_table.init_point = 1; - tipc_ref_table.first_free = 0; - tipc_ref_table.last_free = 0; - tipc_ref_table.index_mask = actual_size - 1; - tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; - - return 0; -} - -/** - * tipc_ref_table_stop - destroy reference table for objects - */ -void tipc_ref_table_stop(void) -{ - vfree(tipc_ref_table.entries); - tipc_ref_table.entries = NULL; -} - -/** - * tipc_ref_acquire - create reference to an object - * - * Register an object pointer in reference table and lock the object. - * Returns a unique reference value that is used from then on to retrieve the - * object pointer, or to determine that the object has been deregistered. - * - * Note: The object is returned in the locked state so that the caller can - * register a partially initialized object, without running the risk that - * the object will be accessed before initialization is complete. - */ -u32 tipc_ref_acquire(void *object, spinlock_t **lock) -{ - u32 index; - u32 index_mask; - u32 next_plus_upper; - u32 ref; - struct reference *entry = NULL; - - if (!object) { - pr_err("Attempt to acquire ref. to non-existent obj\n"); - return 0; - } - if (!tipc_ref_table.entries) { - pr_err("Ref. table not found in acquisition attempt\n"); - return 0; - } - - /* take a free entry, if available; otherwise initialize a new entry */ - spin_lock_bh(&ref_table_lock); - if (tipc_ref_table.first_free) { - index = tipc_ref_table.first_free; - entry = &(tipc_ref_table.entries[index]); - index_mask = tipc_ref_table.index_mask; - next_plus_upper = entry->ref; - tipc_ref_table.first_free = next_plus_upper & index_mask; - ref = (next_plus_upper & ~index_mask) + index; - } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { - index = tipc_ref_table.init_point++; - entry = &(tipc_ref_table.entries[index]); - spin_lock_init(&entry->lock); - ref = tipc_ref_table.start_mask + index; - } else { - ref = 0; - } - spin_unlock_bh(&ref_table_lock); - - /* - * Grab the lock so no one else can modify this entry - * While we assign its ref value & object pointer - */ - if (entry) { - spin_lock_bh(&entry->lock); - entry->ref = ref; - entry->object = object; - *lock = &entry->lock; - /* - * keep it locked, the caller is responsible - * for unlocking this when they're done with it - */ - } - - return ref; -} - -/** - * tipc_ref_discard - invalidate references to an object - * - * Disallow future references to an object and free up the entry for re-use. - * Note: The entry's spin_lock may still be busy after discard - */ -void tipc_ref_discard(u32 ref) -{ - struct reference *entry; - u32 index; - u32 index_mask; - - if (!tipc_ref_table.entries) { - pr_err("Ref. table not found during discard attempt\n"); - return; - } - - index_mask = tipc_ref_table.index_mask; - index = ref & index_mask; - entry = &(tipc_ref_table.entries[index]); - - spin_lock_bh(&ref_table_lock); - - if (!entry->object) { - pr_err("Attempt to discard ref. to non-existent obj\n"); - goto exit; - } - if (entry->ref != ref) { - pr_err("Attempt to discard non-existent reference\n"); - goto exit; - } - - /* - * mark entry as unused; increment instance part of entry's reference - * to invalidate any subsequent references - */ - entry->object = NULL; - entry->ref = (ref & ~index_mask) + (index_mask + 1); - - /* append entry to free entry list */ - if (tipc_ref_table.first_free == 0) - tipc_ref_table.first_free = index; - else - tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index; - tipc_ref_table.last_free = index; - -exit: - spin_unlock_bh(&ref_table_lock); -} - -/** - * tipc_ref_lock - lock referenced object and return pointer to it - */ -void *tipc_ref_lock(u32 ref) -{ - if (likely(tipc_ref_table.entries)) { - struct reference *entry; - - entry = &tipc_ref_table.entries[ref & - tipc_ref_table.index_mask]; - if (likely(entry->ref != 0)) { - spin_lock_bh(&entry->lock); - if (likely((entry->ref == ref) && (entry->object))) - return entry->object; - spin_unlock_bh(&entry->lock); - } - } - return NULL; -} diff --git a/net/tipc/ref.h b/net/tipc/ref.h deleted file mode 100644 index d01aa1df63b8..000000000000 --- a/net/tipc/ref.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * net/tipc/ref.h: Include file for TIPC object registry code - * - * Copyright (c) 1991-2006, Ericsson AB - * Copyright (c) 2005-2006, Wind River Systems - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _TIPC_REF_H -#define _TIPC_REF_H - -int tipc_ref_table_init(u32 requested_size, u32 start); -void tipc_ref_table_stop(void); - -u32 tipc_ref_acquire(void *object, spinlock_t **lock); -void tipc_ref_discard(u32 ref); - -void *tipc_ref_lock(u32 ref); - -#endif diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ff8c8118d56e..75275c5cf929 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -35,17 +35,67 @@ */ #include "core.h" -#include "port.h" #include "name_table.h" #include "node.h" #include "link.h" #include <linux/export.h> +#include "config.h" +#include "socket.h" #define SS_LISTENING -1 /* socket is listening */ #define SS_READY -2 /* socket is connectionless */ -#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ -#define TIPC_FWD_MSG 1 +#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ +#define CONN_PROBING_INTERVAL 3600000 /* [ms] => 1 h */ +#define TIPC_FWD_MSG 1 +#define TIPC_CONN_OK 0 +#define TIPC_CONN_PROBING 1 + +/** + * struct tipc_sock - TIPC socket structure + * @sk: socket - interacts with 'port' and with user via the socket API + * @connected: non-zero if port is currently connected to a peer port + * @conn_type: TIPC type used when connection was established + * @conn_instance: TIPC instance used when connection was established + * @published: non-zero if port has one or more associated names + * @max_pkt: maximum packet size "hint" used when building messages sent by port + * @ref: unique reference to port in TIPC object registry + * @phdr: preformatted message header used when sending messages + * @port_list: adjacent ports in TIPC's global list of ports + * @publications: list of publications for port + * @pub_count: total # of publications port has made during its lifetime + * @probing_state: + * @probing_interval: + * @timer: + * @port: port - interacts with 'sk' and with the rest of the TIPC stack + * @peer_name: the peer of the connection, if any + * @conn_timeout: the time we can wait for an unresponded setup request + * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue + * @link_cong: non-zero if owner must sleep because of link congestion + * @sent_unacked: # messages sent by socket, and not yet acked by peer + * @rcv_unacked: # messages read by user, but not yet acked back to peer + */ +struct tipc_sock { + struct sock sk; + int connected; + u32 conn_type; + u32 conn_instance; + int published; + u32 max_pkt; + u32 ref; + struct tipc_msg phdr; + struct list_head sock_list; + struct list_head publications; + u32 pub_count; + u32 probing_state; + u32 probing_interval; + struct timer_list timer; + uint conn_timeout; + atomic_t dupl_rcvcnt; + bool link_cong; + uint sent_unacked; + uint rcv_unacked; +}; static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); static void tipc_data_ready(struct sock *sk); @@ -53,6 +103,16 @@ static void tipc_write_space(struct sock *sk); static int tipc_release(struct socket *sock); static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); +static void tipc_sk_timeout(unsigned long ref); +static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, + struct tipc_name_seq const *seq); +static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, + struct tipc_name_seq const *seq); +static u32 tipc_sk_ref_acquire(struct tipc_sock *tsk); +static void tipc_sk_ref_discard(u32 ref); +static struct tipc_sock *tipc_sk_get(u32 ref); +static struct tipc_sock *tipc_sk_get_next(u32 *ref); +static void tipc_sk_put(struct tipc_sock *tsk); static const struct proto_ops packet_ops; static const struct proto_ops stream_ops; @@ -106,24 +166,75 @@ static struct proto tipc_proto_kern; * - port reference */ -#include "socket.h" +static u32 tsk_peer_node(struct tipc_sock *tsk) +{ + return msg_destnode(&tsk->phdr); +} + +static u32 tsk_peer_port(struct tipc_sock *tsk) +{ + return msg_destport(&tsk->phdr); +} + +static bool tsk_unreliable(struct tipc_sock *tsk) +{ + return msg_src_droppable(&tsk->phdr) != 0; +} + +static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable) +{ + msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0); +} + +static bool tsk_unreturnable(struct tipc_sock *tsk) +{ + return msg_dest_droppable(&tsk->phdr) != 0; +} + +static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable) +{ + msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0); +} + +static int tsk_importance(struct tipc_sock *tsk) +{ + return msg_importance(&tsk->phdr); +} + +static int tsk_set_importance(struct tipc_sock *tsk, int imp) +{ + if (imp > TIPC_CRITICAL_IMPORTANCE) + return -EINVAL; + msg_set_importance(&tsk->phdr, (u32)imp); + return 0; +} + +static struct tipc_sock *tipc_sk(const struct sock *sk) +{ + return container_of(sk, struct tipc_sock, sk); +} + +static int tsk_conn_cong(struct tipc_sock *tsk) +{ + return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN; +} /** - * advance_rx_queue - discard first buffer in socket receive queue + * tsk_advance_rx_queue - discard first buffer in socket receive queue * * Caller must hold socket lock */ -static void advance_rx_queue(struct sock *sk) +static void tsk_advance_rx_queue(struct sock *sk) { kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); } /** - * reject_rx_queue - reject all buffers in socket receive queue + * tsk_rej_rx_queue - reject all buffers in socket receive queue * * Caller must hold socket lock */ -static void reject_rx_queue(struct sock *sk) +static void tsk_rej_rx_queue(struct sock *sk) { struct sk_buff *buf; u32 dnode; @@ -134,6 +245,38 @@ static void reject_rx_queue(struct sock *sk) } } +/* tsk_peer_msg - verify if message was sent by connected port's peer + * + * Handles cases where the node's network address has changed from + * the default of <0.0.0> to its configured setting. + */ +static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) +{ + u32 peer_port = tsk_peer_port(tsk); + u32 orig_node; + u32 peer_node; + + if (unlikely(!tsk->connected)) + return false; + + if (unlikely(msg_origport(msg) != peer_port)) + return false; + + orig_node = msg_orignode(msg); + peer_node = tsk_peer_node(tsk); + + if (likely(orig_node == peer_node)) + return true; + + if (!orig_node && (peer_node == tipc_own_addr)) + return true; + + if (!peer_node && (orig_node == tipc_own_addr)) + return true; + + return false; +} + /** * tipc_sk_create - create a TIPC socket * @net: network namespace (must be default network) @@ -153,7 +296,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, socket_state state; struct sock *sk; struct tipc_sock *tsk; - struct tipc_port *port; + struct tipc_msg *msg; u32 ref; /* Validate arguments */ @@ -188,20 +331,24 @@ static int tipc_sk_create(struct net *net, struct socket *sock, return -ENOMEM; tsk = tipc_sk(sk); - port = &tsk->port; - - ref = tipc_port_init(port, TIPC_LOW_IMPORTANCE); + ref = tipc_sk_ref_acquire(tsk); if (!ref) { - pr_warn("Socket registration failed, ref. table exhausted\n"); - sk_free(sk); + pr_warn("Socket create failed; reference table exhausted\n"); return -ENOMEM; } + tsk->max_pkt = MAX_PKT_DEFAULT; + tsk->ref = ref; + INIT_LIST_HEAD(&tsk->publications); + msg = &tsk->phdr; + tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, + NAMED_H_SIZE, 0); + msg_set_origport(msg, ref); /* Finish initializing socket data structures */ sock->ops = ops; sock->state = state; - sock_init_data(sock, sk); + k_init_timer(&tsk->timer, (Handler)tipc_sk_timeout, ref); sk->sk_backlog_rcv = tipc_backlog_rcv; sk->sk_rcvbuf = sysctl_tipc_rmem[1]; sk->sk_data_ready = tipc_data_ready; @@ -209,12 +356,11 @@ static int tipc_sk_create(struct net *net, struct socket *sock, tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; tsk->sent_unacked = 0; atomic_set(&tsk->dupl_rcvcnt, 0); - tipc_port_unlock(port); if (sock->state == SS_READY) { - tipc_port_set_unreturnable(port, true); + tsk_set_unreturnable(tsk, true); if (sock->type == SOCK_DGRAM) - tipc_port_set_unreliable(port, true); + tsk_set_unreliable(tsk, true); } return 0; } @@ -308,7 +454,6 @@ static int tipc_release(struct socket *sock) { struct sock *sk = sock->sk; struct tipc_sock *tsk; - struct tipc_port *port; struct sk_buff *buf; u32 dnode; @@ -320,13 +465,13 @@ static int tipc_release(struct socket *sock) return 0; tsk = tipc_sk(sk); - port = &tsk->port; lock_sock(sk); /* * Reject all unreceived messages, except on an active connection * (which disconnects locally & sends a 'FIN+' to peer) */ + dnode = tsk_peer_node(tsk); while (sock->state != SS_DISCONNECTING) { buf = __skb_dequeue(&sk->sk_receive_queue); if (buf == NULL) @@ -337,17 +482,27 @@ static int tipc_release(struct socket *sock) if ((sock->state == SS_CONNECTING) || (sock->state == SS_CONNECTED)) { sock->state = SS_DISCONNECTING; - tipc_port_disconnect(port->ref); + tsk->connected = 0; + tipc_node_remove_conn(dnode, tsk->ref); } if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) tipc_link_xmit(buf, dnode, 0); } } - /* Destroy TIPC port; also disconnects an active connection and - * sends a 'FIN-' to peer. - */ - tipc_port_destroy(port); + tipc_sk_withdraw(tsk, 0, NULL); + tipc_sk_ref_discard(tsk->ref); + k_cancel_timer(&tsk->timer); + if (tsk->connected) { + buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + SHORT_H_SIZE, 0, dnode, tipc_own_addr, + tsk_peer_port(tsk), + tsk->ref, TIPC_ERR_NO_PORT); + if (buf) + tipc_link_xmit(buf, dnode, tsk->ref); + tipc_node_remove_conn(dnode, tsk->ref); + } + k_term_timer(&tsk->timer); /* Discard any remaining (connection-based) messages in receive queue */ __skb_queue_purge(&sk->sk_receive_queue); @@ -355,7 +510,6 @@ static int tipc_release(struct socket *sock) /* Reject any messages that accumulated in backlog queue */ sock->state = SS_DISCONNECTING; release_sock(sk); - sock_put(sk); sock->sk = NULL; @@ -387,7 +541,7 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, lock_sock(sk); if (unlikely(!uaddr_len)) { - res = tipc_withdraw(&tsk->port, 0, NULL); + res = tipc_sk_withdraw(tsk, 0, NULL); goto exit; } @@ -415,8 +569,8 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, } res = (addr->scope > 0) ? - tipc_publish(&tsk->port, addr->scope, &addr->addr.nameseq) : - tipc_withdraw(&tsk->port, -addr->scope, &addr->addr.nameseq); + tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) : + tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq); exit: release_sock(sk); return res; @@ -446,10 +600,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr, if ((sock->state != SS_CONNECTED) && ((peer != 2) || (sock->state != SS_DISCONNECTING))) return -ENOTCONN; - addr->addr.id.ref = tipc_port_peerport(&tsk->port); - addr->addr.id.node = tipc_port_peernode(&tsk->port); + addr->addr.id.ref = tsk_peer_port(tsk); + addr->addr.id.node = tsk_peer_node(tsk); } else { - addr->addr.id.ref = tsk->port.ref; + addr->addr.id.ref = tsk->ref; addr->addr.id.node = tipc_own_addr; } @@ -518,7 +672,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, break; case SS_READY: case SS_CONNECTED: - if (!tsk->link_cong && !tipc_sk_conn_cong(tsk)) + if (!tsk->link_cong && !tsk_conn_cong(tsk)) mask |= POLLOUT; /* fall thru' */ case SS_CONNECTING: @@ -549,7 +703,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, struct iovec *iov, size_t dsz, long timeo) { struct sock *sk = sock->sk; - struct tipc_msg *mhdr = &tipc_sk(sk)->port.phdr; + struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; struct sk_buff *buf; uint mtu; int rc; @@ -579,6 +733,7 @@ new_mtu: goto new_mtu; if (rc != -ELINKCONG) break; + tipc_sk(sk)->link_cong = 1; rc = tipc_wait_for_sndmsg(sock, &timeo); if (rc) kfree_skb_list(buf); @@ -638,20 +793,19 @@ static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode, struct sk_buff *buf) { struct tipc_msg *msg = buf_msg(buf); - struct tipc_port *port = &tsk->port; int conn_cong; /* Ignore if connection cannot be validated: */ - if (!port->connected || !tipc_port_peer_msg(port, msg)) + if (!tsk_peer_msg(tsk, msg)) goto exit; - port->probing_state = TIPC_CONN_OK; + tsk->probing_state = TIPC_CONN_OK; if (msg_type(msg) == CONN_ACK) { - conn_cong = tipc_sk_conn_cong(tsk); + conn_cong = tsk_conn_cong(tsk); tsk->sent_unacked -= msg_msgcnt(msg); if (conn_cong) - tipc_sock_wakeup(tsk); + tsk->sk.sk_write_space(&tsk->sk); } else if (msg_type(msg) == CONN_PROBE) { if (!tipc_msg_reverse(buf, dnode, TIPC_OK)) return TIPC_OK; @@ -742,8 +896,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; - struct tipc_msg *mhdr = &port->phdr; + struct tipc_msg *mhdr = &tsk->phdr; struct iovec *iov = m->msg_iov; u32 dnode, dport; struct sk_buff *buf; @@ -774,13 +927,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, rc = -EISCONN; goto exit; } - if (tsk->port.published) { + if (tsk->published) { rc = -EOPNOTSUPP; goto exit; } if (dest->addrtype == TIPC_ADDR_NAME) { - tsk->port.conn_type = dest->addr.name.name.type; - tsk->port.conn_instance = dest->addr.name.name.instance; + tsk->conn_type = dest->addr.name.name.type; + tsk->conn_instance = dest->addr.name.name.instance; } } rc = dest_name_check(dest, m); @@ -820,13 +973,14 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, } new_mtu: - mtu = tipc_node_get_mtu(dnode, tsk->port.ref); + mtu = tipc_node_get_mtu(dnode, tsk->ref); rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); if (rc < 0) goto exit; do { - rc = tipc_link_xmit(buf, dnode, tsk->port.ref); + TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong; + rc = tipc_link_xmit(buf, dnode, tsk->ref); if (likely(rc >= 0)) { if (sock->state != SS_READY) sock->state = SS_CONNECTING; @@ -835,10 +989,9 @@ new_mtu: } if (rc == -EMSGSIZE) goto new_mtu; - if (rc != -ELINKCONG) break; - + tsk->link_cong = 1; rc = tipc_wait_for_sndmsg(sock, &timeo); if (rc) kfree_skb_list(buf); @@ -873,8 +1026,8 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p) prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); done = sk_wait_event(sk, timeo_p, (!tsk->link_cong && - !tipc_sk_conn_cong(tsk)) || - !tsk->port.connected); + !tsk_conn_cong(tsk)) || + !tsk->connected); finish_wait(sk_sleep(sk), &wait); } while (!done); return 0; @@ -897,11 +1050,10 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; - struct tipc_msg *mhdr = &port->phdr; + struct tipc_msg *mhdr = &tsk->phdr; struct sk_buff *buf; DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); - u32 ref = port->ref; + u32 ref = tsk->ref; int rc = -EINVAL; long timeo; u32 dnode; @@ -929,16 +1081,16 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, } timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); - dnode = tipc_port_peernode(port); + dnode = tsk_peer_node(tsk); next: - mtu = port->max_pkt; + mtu = tsk->max_pkt; send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf); if (unlikely(rc < 0)) goto exit; do { - if (likely(!tipc_sk_conn_cong(tsk))) { + if (likely(!tsk_conn_cong(tsk))) { rc = tipc_link_xmit(buf, dnode, ref); if (likely(!rc)) { tsk->sent_unacked++; @@ -948,11 +1100,12 @@ next: goto next; } if (rc == -EMSGSIZE) { - port->max_pkt = tipc_node_get_mtu(dnode, ref); + tsk->max_pkt = tipc_node_get_mtu(dnode, ref); goto next; } if (rc != -ELINKCONG) break; + tsk->link_cong = 1; } rc = tipc_wait_for_sndpkt(sock, &timeo); if (rc) @@ -984,29 +1137,25 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock, return tipc_send_stream(iocb, sock, m, dsz); } -/** - * auto_connect - complete connection setup to a remote port - * @tsk: tipc socket structure - * @msg: peer's response message - * - * Returns 0 on success, errno otherwise +/* tipc_sk_finish_conn - complete the setup of a connection */ -static int auto_connect(struct tipc_sock *tsk, struct tipc_msg *msg) +static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, + u32 peer_node) { - struct tipc_port *port = &tsk->port; - struct socket *sock = tsk->sk.sk_socket; - struct tipc_portid peer; - - peer.ref = msg_origport(msg); - peer.node = msg_orignode(msg); - - __tipc_port_connect(port->ref, port, &peer); - - if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE) - return -EINVAL; - msg_set_importance(&port->phdr, (u32)msg_importance(msg)); - sock->state = SS_CONNECTED; - return 0; + struct tipc_msg *msg = &tsk->phdr; + + msg_set_destnode(msg, peer_node); + msg_set_destport(msg, peer_port); + msg_set_type(msg, TIPC_CONN_MSG); + msg_set_lookup_scope(msg, 0); + msg_set_hdr_sz(msg, SHORT_H_SIZE); + + tsk->probing_interval = CONN_PROBING_INTERVAL; + tsk->probing_state = TIPC_CONN_OK; + tsk->connected = 1; + k_start_timer(&tsk->timer, tsk->probing_interval); + tipc_node_add_conn(peer_node, tsk->ref, peer_port); + tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->ref); } /** @@ -1033,17 +1182,17 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) } /** - * anc_data_recv - optionally capture ancillary data for received message + * tipc_sk_anc_data_recv - optionally capture ancillary data for received message * @m: descriptor for message info * @msg: received message header - * @tport: TIPC port associated with message + * @tsk: TIPC port associated with message * * Note: Ancillary data is not captured if not requested by receiver. * * Returns 0 if successful, otherwise errno */ -static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, - struct tipc_port *tport) +static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, + struct tipc_sock *tsk) { u32 anc_data[3]; u32 err; @@ -1086,10 +1235,10 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, anc_data[2] = msg_nameupper(msg); break; case TIPC_CONN_MSG: - has_name = (tport->conn_type != 0); - anc_data[0] = tport->conn_type; - anc_data[1] = tport->conn_instance; - anc_data[2] = tport->conn_instance; + has_name = (tsk->conn_type != 0); + anc_data[0] = tsk->conn_type; + anc_data[1] = tsk->conn_instance; + anc_data[2] = tsk->conn_instance; break; default: has_name = 0; @@ -1103,6 +1252,24 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, return 0; } +static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) +{ + struct sk_buff *buf = NULL; + struct tipc_msg *msg; + u32 peer_port = tsk_peer_port(tsk); + u32 dnode = tsk_peer_node(tsk); + + if (!tsk->connected) + return; + buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, + tipc_own_addr, peer_port, tsk->ref, TIPC_OK); + if (!buf) + return; + msg = buf_msg(buf); + msg_set_msgcnt(msg, ack); + tipc_link_xmit(buf, dnode, msg_link_selector(msg)); +} + static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) { struct sock *sk = sock->sk; @@ -1153,7 +1320,6 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; struct sk_buff *buf; struct tipc_msg *msg; long timeo; @@ -1188,7 +1354,7 @@ restart: /* Discard an empty non-errored message & try again */ if ((!sz) && (!err)) { - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); goto restart; } @@ -1196,7 +1362,7 @@ restart: set_orig_addr(m, msg); /* Capture ancillary data (optional) */ - res = anc_data_recv(m, msg, port); + res = tipc_sk_anc_data_recv(m, msg, tsk); if (res) goto exit; @@ -1223,10 +1389,10 @@ restart: if (likely(!(flags & MSG_PEEK))) { if ((sock->state != SS_READY) && (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { - tipc_acknowledge(port->ref, tsk->rcv_unacked); + tipc_sk_send_ack(tsk, tsk->rcv_unacked); tsk->rcv_unacked = 0; } - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); } exit: release_sock(sk); @@ -1250,7 +1416,6 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; struct sk_buff *buf; struct tipc_msg *msg; long timeo; @@ -1288,14 +1453,14 @@ restart: /* Discard an empty non-errored message & try again */ if ((!sz) && (!err)) { - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); goto restart; } /* Optionally capture sender's address & ancillary data of first msg */ if (sz_copied == 0) { set_orig_addr(m, msg); - res = anc_data_recv(m, msg, port); + res = tipc_sk_anc_data_recv(m, msg, tsk); if (res) goto exit; } @@ -1334,10 +1499,10 @@ restart: /* Consume received message (optional) */ if (likely(!(flags & MSG_PEEK))) { if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { - tipc_acknowledge(port->ref, tsk->rcv_unacked); + tipc_sk_send_ack(tsk, tsk->rcv_unacked); tsk->rcv_unacked = 0; } - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); } /* Loop around if more data is required */ @@ -1396,12 +1561,9 @@ static void tipc_data_ready(struct sock *sk) static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) { struct sock *sk = &tsk->sk; - struct tipc_port *port = &tsk->port; struct socket *sock = sk->sk_socket; struct tipc_msg *msg = buf_msg(*buf); - int retval = -TIPC_ERR_NO_PORT; - int res; if (msg_mcast(msg)) return retval; @@ -1409,16 +1571,23 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) switch ((int)sock->state) { case SS_CONNECTED: /* Accept only connection-based messages sent by peer */ - if (msg_connected(msg) && tipc_port_peer_msg(port, msg)) { + if (tsk_peer_msg(tsk, msg)) { if (unlikely(msg_errcode(msg))) { sock->state = SS_DISCONNECTING; - __tipc_port_disconnect(port); + tsk->connected = 0; + /* let timer expire on it's own */ + tipc_node_remove_conn(tsk_peer_node(tsk), + tsk->ref); } retval = TIPC_OK; } break; case SS_CONNECTING: /* Accept only ACK or NACK message */ + + if (unlikely(!msg_connected(msg))) + break; + if (unlikely(msg_errcode(msg))) { sock->state = SS_DISCONNECTING; sk->sk_err = ECONNREFUSED; @@ -1426,17 +1595,17 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) break; } - if (unlikely(!msg_connected(msg))) - break; - - res = auto_connect(tsk, msg); - if (res) { + if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) { sock->state = SS_DISCONNECTING; - sk->sk_err = -res; + sk->sk_err = EINVAL; retval = TIPC_OK; break; } + tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg)); + msg_set_importance(&tsk->phdr, msg_importance(msg)); + sock->state = SS_CONNECTED; + /* If an incoming message is an 'ACK-', it should be * discarded here because it doesn't contain useful * data. In addition, we should try to wake up @@ -1518,6 +1687,13 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf) if (unlikely(msg_user(msg) == CONN_MANAGER)) return tipc_sk_proto_rcv(tsk, &onode, buf); + if (unlikely(msg_user(msg) == SOCK_WAKEUP)) { + kfree_skb(buf); + tsk->link_cong = 0; + sk->sk_write_space(sk); + return TIPC_OK; + } + /* Reject message if it is wrong sort of message for socket */ if (msg_type(msg) > TIPC_DIRECT_MSG) return -TIPC_ERR_NO_PORT; @@ -1585,7 +1761,6 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) int tipc_sk_rcv(struct sk_buff *buf) { struct tipc_sock *tsk; - struct tipc_port *port; struct sock *sk; u32 dport = msg_destport(buf_msg(buf)); int rc = TIPC_OK; @@ -1593,13 +1768,11 @@ int tipc_sk_rcv(struct sk_buff *buf) u32 dnode; /* Validate destination and message */ - port = tipc_port_lock(dport); - if (unlikely(!port)) { + tsk = tipc_sk_get(dport); + if (unlikely(!tsk)) { rc = tipc_msg_eval(buf, &dnode); goto exit; } - - tsk = tipc_port_to_sock(port); sk = &tsk->sk; /* Queue message */ @@ -1615,8 +1788,7 @@ int tipc_sk_rcv(struct sk_buff *buf) rc = -TIPC_ERR_OVERLOAD; } bh_unlock_sock(sk); - tipc_port_unlock(port); - + tipc_sk_put(tsk); if (likely(!rc)) return 0; exit: @@ -1803,10 +1975,8 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) { struct sock *new_sk, *sk = sock->sk; struct sk_buff *buf; - struct tipc_port *new_port; + struct tipc_sock *new_tsock; struct tipc_msg *msg; - struct tipc_portid peer; - u32 new_ref; long timeo; int res; @@ -1828,8 +1998,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) goto exit; new_sk = new_sock->sk; - new_port = &tipc_sk(new_sk)->port; - new_ref = new_port->ref; + new_tsock = tipc_sk(new_sk); msg = buf_msg(buf); /* we lock on new_sk; but lockdep sees the lock on sk */ @@ -1839,18 +2008,16 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) * Reject any stray messages received by new socket * before the socket lock was taken (very, very unlikely) */ - reject_rx_queue(new_sk); + tsk_rej_rx_queue(new_sk); /* Connect new socket to it's peer */ - peer.ref = msg_origport(msg); - peer.node = msg_orignode(msg); - tipc_port_connect(new_ref, &peer); + tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg)); new_sock->state = SS_CONNECTED; - tipc_port_set_importance(new_port, msg_importance(msg)); + tsk_set_importance(new_tsock, msg_importance(msg)); if (msg_named(msg)) { - new_port->conn_type = msg_nametype(msg); - new_port->conn_instance = msg_nameinst(msg); + new_tsock->conn_type = msg_nametype(msg); + new_tsock->conn_instance = msg_nameinst(msg); } /* @@ -1860,7 +2027,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) if (!msg_data_sz(msg)) { struct msghdr m = {NULL,}; - advance_rx_queue(sk); + tsk_advance_rx_queue(sk); tipc_send_packet(NULL, new_sock, &m, 0); } else { __skb_dequeue(&sk->sk_receive_queue); @@ -1886,9 +2053,8 @@ static int tipc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; struct sk_buff *buf; - u32 peer; + u32 dnode; int res; if (how != SHUT_RDWR) @@ -1908,15 +2074,21 @@ restart: kfree_skb(buf); goto restart; } - tipc_port_disconnect(port->ref); - if (tipc_msg_reverse(buf, &peer, TIPC_CONN_SHUTDOWN)) - tipc_link_xmit(buf, peer, 0); + if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN)) + tipc_link_xmit(buf, dnode, tsk->ref); + tipc_node_remove_conn(dnode, tsk->ref); } else { - tipc_port_shutdown(port->ref); + dnode = tsk_peer_node(tsk); + buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, + TIPC_CONN_MSG, SHORT_H_SIZE, + 0, dnode, tipc_own_addr, + tsk_peer_port(tsk), + tsk->ref, TIPC_CONN_SHUTDOWN); + tipc_link_xmit(buf, dnode, tsk->ref); } - + tsk->connected = 0; sock->state = SS_DISCONNECTING; - + tipc_node_remove_conn(dnode, tsk->ref); /* fall through */ case SS_DISCONNECTING: @@ -1937,6 +2109,432 @@ restart: return res; } +static void tipc_sk_timeout(unsigned long ref) +{ + struct tipc_sock *tsk; + struct sock *sk; + struct sk_buff *buf = NULL; + u32 peer_port, peer_node; + + tsk = tipc_sk_get(ref); + if (!tsk) + return; + + sk = &tsk->sk; + bh_lock_sock(sk); + if (!tsk->connected) { + bh_unlock_sock(sk); + goto exit; + } + peer_port = tsk_peer_port(tsk); + peer_node = tsk_peer_node(tsk); + + if (tsk->probing_state == TIPC_CONN_PROBING) { + /* Previous probe not answered -> self abort */ + buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, + SHORT_H_SIZE, 0, tipc_own_addr, + peer_node, ref, peer_port, + TIPC_ERR_NO_PORT); + } else { + buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, + 0, peer_node, tipc_own_addr, + peer_port, ref, TIPC_OK); + tsk->probing_state = TIPC_CONN_PROBING; + k_start_timer(&tsk->timer, tsk->probing_interval); + } + bh_unlock_sock(sk); + if (buf) + tipc_link_xmit(buf, peer_node, ref); +exit: + tipc_sk_put(tsk); +} + +static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, + struct tipc_name_seq const *seq) +{ + struct publication *publ; + u32 key; + + if (tsk->connected) + return -EINVAL; + key = tsk->ref + tsk->pub_count + 1; + if (key == tsk->ref) + return -EADDRINUSE; + + publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, + scope, tsk->ref, key); + if (unlikely(!publ)) + return -EINVAL; + + list_add(&publ->pport_list, &tsk->publications); + tsk->pub_count++; + tsk->published = 1; + return 0; +} + +static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, + struct tipc_name_seq const *seq) +{ + struct publication *publ; + struct publication *safe; + int rc = -EINVAL; + + list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) { + if (seq) { + if (publ->scope != scope) + continue; + if (publ->type != seq->type) + continue; + if (publ->lower != seq->lower) + continue; + if (publ->upper != seq->upper) + break; + tipc_nametbl_withdraw(publ->type, publ->lower, + publ->ref, publ->key); + rc = 0; + break; + } + tipc_nametbl_withdraw(publ->type, publ->lower, + publ->ref, publ->key); + rc = 0; + } + if (list_empty(&tsk->publications)) + tsk->published = 0; + return rc; +} + +static int tipc_sk_show(struct tipc_sock *tsk, char *buf, + int len, int full_id) +{ + struct publication *publ; + int ret; + + if (full_id) + ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:", + tipc_zone(tipc_own_addr), + tipc_cluster(tipc_own_addr), + tipc_node(tipc_own_addr), tsk->ref); + else + ret = tipc_snprintf(buf, len, "%-10u:", tsk->ref); + + if (tsk->connected) { + u32 dport = tsk_peer_port(tsk); + u32 destnode = tsk_peer_node(tsk); + + ret += tipc_snprintf(buf + ret, len - ret, + " connected to <%u.%u.%u:%u>", + tipc_zone(destnode), + tipc_cluster(destnode), + tipc_node(destnode), dport); + if (tsk->conn_type != 0) + ret += tipc_snprintf(buf + ret, len - ret, + " via {%u,%u}", tsk->conn_type, + tsk->conn_instance); + } else if (tsk->published) { + ret += tipc_snprintf(buf + ret, len - ret, " bound to"); + list_for_each_entry(publ, &tsk->publications, pport_list) { + if (publ->lower == publ->upper) + ret += tipc_snprintf(buf + ret, len - ret, + " {%u,%u}", publ->type, + publ->lower); + else + ret += tipc_snprintf(buf + ret, len - ret, + " {%u,%u,%u}", publ->type, + publ->lower, publ->upper); + } + } + ret += tipc_snprintf(buf + ret, len - ret, "\n"); + return ret; +} + +struct sk_buff *tipc_sk_socks_show(void) +{ + struct sk_buff *buf; + struct tlv_desc *rep_tlv; + char *pb; + int pb_len; + struct tipc_sock *tsk; + int str_len = 0; + u32 ref = 0; + + buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN)); + if (!buf) + return NULL; + rep_tlv = (struct tlv_desc *)buf->data; + pb = TLV_DATA(rep_tlv); + pb_len = ULTRA_STRING_MAX_LEN; + + tsk = tipc_sk_get_next(&ref); + for (; tsk; tsk = tipc_sk_get_next(&ref)) { + lock_sock(&tsk->sk); + str_len += tipc_sk_show(tsk, pb + str_len, + pb_len - str_len, 0); + release_sock(&tsk->sk); + tipc_sk_put(tsk); + } + str_len += 1; /* for "\0" */ + skb_put(buf, TLV_SPACE(str_len)); + TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); + + return buf; +} + +/* tipc_sk_reinit: set non-zero address in all existing sockets + * when we go from standalone to network mode. + */ +void tipc_sk_reinit(void) +{ + struct tipc_msg *msg; + u32 ref = 0; + struct tipc_sock *tsk = tipc_sk_get_next(&ref); + + for (; tsk; tsk = tipc_sk_get_next(&ref)) { + lock_sock(&tsk->sk); + msg = &tsk->phdr; + msg_set_prevnode(msg, tipc_own_addr); + msg_set_orignode(msg, tipc_own_addr); + release_sock(&tsk->sk); + tipc_sk_put(tsk); + } +} + +/** + * struct reference - TIPC socket reference entry + * @tsk: pointer to socket associated with reference entry + * @ref: reference value for socket (combines instance & array index info) + */ +struct reference { + struct tipc_sock *tsk; + u32 ref; +}; + +/** + * struct tipc_ref_table - table of TIPC socket reference entries + * @entries: pointer to array of reference entries + * @capacity: array index of first unusable entry + * @init_point: array index of first uninitialized entry + * @first_free: array index of first unused socket reference entry + * @last_free: array index of last unused socket reference entry + * @index_mask: bitmask for array index portion of reference values + * @start_mask: initial value for instance value portion of reference values + */ +struct ref_table { + struct reference *entries; + u32 capacity; + u32 init_point; + u32 first_free; + u32 last_free; + u32 index_mask; + u32 start_mask; +}; + +/* Socket reference table consists of 2**N entries. + * + * State Socket ptr Reference + * ----- ---------- --------- + * In use non-NULL XXXX|own index + * (XXXX changes each time entry is acquired) + * Free NULL YYYY|next free index + * (YYYY is one more than last used XXXX) + * Uninitialized NULL 0 + * + * Entry 0 is not used; this allows index 0 to denote the end of the free list. + * + * Note that a reference value of 0 does not necessarily indicate that an + * entry is uninitialized, since the last entry in the free list could also + * have a reference value of 0 (although this is unlikely). + */ + +static struct ref_table tipc_ref_table; + +static DEFINE_RWLOCK(ref_table_lock); + +/** + * tipc_ref_table_init - create reference table for sockets + */ +int tipc_sk_ref_table_init(u32 req_sz, u32 start) +{ + struct reference *table; + u32 actual_sz; + + /* account for unused entry, then round up size to a power of 2 */ + + req_sz++; + for (actual_sz = 16; actual_sz < req_sz; actual_sz <<= 1) { + /* do nothing */ + }; + + /* allocate table & mark all entries as uninitialized */ + table = vzalloc(actual_sz * sizeof(struct reference)); + if (table == NULL) + return -ENOMEM; + + tipc_ref_table.entries = table; + tipc_ref_table.capacity = req_sz; + tipc_ref_table.init_point = 1; + tipc_ref_table.first_free = 0; + tipc_ref_table.last_free = 0; + tipc_ref_table.index_mask = actual_sz - 1; + tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask; + + return 0; +} + +/** + * tipc_ref_table_stop - destroy reference table for sockets + */ +void tipc_sk_ref_table_stop(void) +{ + if (!tipc_ref_table.entries) + return; + vfree(tipc_ref_table.entries); + tipc_ref_table.entries = NULL; +} + +/* tipc_ref_acquire - create reference to a socket + * + * Register an socket pointer in the reference table. + * Returns a unique reference value that is used from then on to retrieve the + * socket pointer, or to determine if the socket has been deregistered. + */ +u32 tipc_sk_ref_acquire(struct tipc_sock *tsk) +{ + u32 index; + u32 index_mask; + u32 next_plus_upper; + u32 ref = 0; + struct reference *entry; + + if (unlikely(!tsk)) { + pr_err("Attempt to acquire ref. to non-existent obj\n"); + return 0; + } + if (unlikely(!tipc_ref_table.entries)) { + pr_err("Ref. table not found in acquisition attempt\n"); + return 0; + } + + /* Take a free entry, if available; otherwise initialize a new one */ + write_lock_bh(&ref_table_lock); + index = tipc_ref_table.first_free; + entry = &tipc_ref_table.entries[index]; + + if (likely(index)) { + index = tipc_ref_table.first_free; + entry = &tipc_ref_table.entries[index]; + index_mask = tipc_ref_table.index_mask; + next_plus_upper = entry->ref; + tipc_ref_table.first_free = next_plus_upper & index_mask; + ref = (next_plus_upper & ~index_mask) + index; + entry->tsk = tsk; + } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) { + index = tipc_ref_table.init_point++; + entry = &tipc_ref_table.entries[index]; + ref = tipc_ref_table.start_mask + index; + } + + if (ref) { + entry->ref = ref; + entry->tsk = tsk; + } + write_unlock_bh(&ref_table_lock); + return ref; +} + +/* tipc_sk_ref_discard - invalidate reference to an socket + * + * Disallow future references to an socket and free up the entry for re-use. + */ +void tipc_sk_ref_discard(u32 ref) +{ + struct reference *entry; + u32 index; + u32 index_mask; + + if (unlikely(!tipc_ref_table.entries)) { + pr_err("Ref. table not found during discard attempt\n"); + return; + } + + index_mask = tipc_ref_table.index_mask; + index = ref & index_mask; + entry = &tipc_ref_table.entries[index]; + + write_lock_bh(&ref_table_lock); + + if (unlikely(!entry->tsk)) { + pr_err("Attempt to discard ref. to non-existent socket\n"); + goto exit; + } + if (unlikely(entry->ref != ref)) { + pr_err("Attempt to discard non-existent reference\n"); + goto exit; + } + + /* Mark entry as unused; increment instance part of entry's + * reference to invalidate any subsequent references + */ + + entry->tsk = NULL; + entry->ref = (ref & ~index_mask) + (index_mask + 1); + + /* Append entry to free entry list */ + if (unlikely(tipc_ref_table.first_free == 0)) + tipc_ref_table.first_free = index; + else + tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index; + tipc_ref_table.last_free = index; +exit: + write_unlock_bh(&ref_table_lock); +} + +/* tipc_sk_get - find referenced socket and return pointer to it + */ +struct tipc_sock *tipc_sk_get(u32 ref) +{ + struct reference *entry; + struct tipc_sock *tsk; + + if (unlikely(!tipc_ref_table.entries)) + return NULL; + read_lock_bh(&ref_table_lock); + entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask]; + tsk = entry->tsk; + if (likely(tsk && (entry->ref == ref))) + sock_hold(&tsk->sk); + else + tsk = NULL; + read_unlock_bh(&ref_table_lock); + return tsk; +} + +/* tipc_sk_get_next - lock & return next socket after referenced one +*/ +struct tipc_sock *tipc_sk_get_next(u32 *ref) +{ + struct reference *entry; + struct tipc_sock *tsk = NULL; + uint index = *ref & tipc_ref_table.index_mask; + + read_lock_bh(&ref_table_lock); + while (++index < tipc_ref_table.capacity) { + entry = &tipc_ref_table.entries[index]; + if (!entry->tsk) + continue; + tsk = entry->tsk; + sock_hold(&tsk->sk); + *ref = entry->ref; + break; + } + read_unlock_bh(&ref_table_lock); + return tsk; +} + +static void tipc_sk_put(struct tipc_sock *tsk) +{ + sock_put(&tsk->sk); +} + /** * tipc_setsockopt - set socket option * @sock: socket structure @@ -1955,7 +2553,6 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; u32 value; int res; @@ -1973,16 +2570,16 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, switch (opt) { case TIPC_IMPORTANCE: - res = tipc_port_set_importance(port, value); + res = tsk_set_importance(tsk, value); break; case TIPC_SRC_DROPPABLE: if (sock->type != SOCK_STREAM) - tipc_port_set_unreliable(port, value); + tsk_set_unreliable(tsk, value); else res = -ENOPROTOOPT; break; case TIPC_DEST_DROPPABLE: - tipc_port_set_unreturnable(port, value); + tsk_set_unreturnable(tsk, value); break; case TIPC_CONN_TIMEOUT: tipc_sk(sk)->conn_timeout = value; @@ -2015,7 +2612,6 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_port *port = &tsk->port; int len; u32 value; int res; @@ -2032,16 +2628,16 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt, switch (opt) { case TIPC_IMPORTANCE: - value = tipc_port_importance(port); + value = tsk_importance(tsk); break; case TIPC_SRC_DROPPABLE: - value = tipc_port_unreliable(port); + value = tsk_unreliable(tsk); break; case TIPC_DEST_DROPPABLE: - value = tipc_port_unreturnable(port); + value = tsk_unreturnable(tsk); break; case TIPC_CONN_TIMEOUT: - value = tipc_sk(sk)->conn_timeout; + value = tsk->conn_timeout; /* no need to set "res", since already 0 at this point */ break; case TIPC_NODE_RECVQ_DEPTH: diff --git a/net/tipc/socket.h b/net/tipc/socket.h index 43b75b3ceced..baa43d03901e 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h @@ -35,56 +35,17 @@ #ifndef _TIPC_SOCK_H #define _TIPC_SOCK_H -#include "port.h" #include <net/sock.h> -#define TIPC_CONN_OK 0 -#define TIPC_CONN_PROBING 1 - -/** - * struct tipc_sock - TIPC socket structure - * @sk: socket - interacts with 'port' and with user via the socket API - * @port: port - interacts with 'sk' and with the rest of the TIPC stack - * @peer_name: the peer of the connection, if any - * @conn_timeout: the time we can wait for an unresponded setup request - * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue - * @link_cong: non-zero if owner must sleep because of link congestion - * @sent_unacked: # messages sent by socket, and not yet acked by peer - * @rcv_unacked: # messages read by user, but not yet acked back to peer - */ - -struct tipc_sock { - struct sock sk; - struct tipc_port port; - unsigned int conn_timeout; - atomic_t dupl_rcvcnt; - int link_cong; - uint sent_unacked; - uint rcv_unacked; -}; - -static inline struct tipc_sock *tipc_sk(const struct sock *sk) -{ - return container_of(sk, struct tipc_sock, sk); -} - -static inline struct tipc_sock *tipc_port_to_sock(const struct tipc_port *port) -{ - return container_of(port, struct tipc_sock, port); -} - -static inline void tipc_sock_wakeup(struct tipc_sock *tsk) -{ - tsk->sk.sk_write_space(&tsk->sk); -} - -static inline int tipc_sk_conn_cong(struct tipc_sock *tsk) -{ - return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN; -} - +#define TIPC_CONNACK_INTV 256 +#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) +#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \ + SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) int tipc_sk_rcv(struct sk_buff *buf); - +struct sk_buff *tipc_sk_socks_show(void); void tipc_sk_mcast_rcv(struct sk_buff *buf); +void tipc_sk_reinit(void); +int tipc_sk_ref_table_init(u32 requested_size, u32 start); +void tipc_sk_ref_table_stop(void); #endif diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 642437231ad5..31b5cb232a43 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -36,7 +36,6 @@ #include "core.h" #include "name_table.h" -#include "port.h" #include "subscr.h" /** diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c index f3fef93325a8..1a779b1e8510 100644 --- a/net/tipc/sysctl.c +++ b/net/tipc/sysctl.c @@ -47,6 +47,13 @@ static struct ctl_table tipc_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "named_timeout", + .data = &sysctl_tipc_named_timeout, + .maxlen = sizeof(sysctl_tipc_named_timeout), + .mode = 0644, + .proc_handler = proc_dointvec, + }, {} }; diff --git a/net/wireless/core.c b/net/wireless/core.c index afee5e0455ea..c6620aa679e0 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -492,12 +492,6 @@ int wiphy_register(struct wiphy *wiphy) int i; u16 ifmodes = wiphy->interface_modes; - /* - * There are major locking problems in nl80211/mac80211 for CSA, - * disable for all drivers until this has been reworked. - */ - wiphy->flags &= ~WIPHY_FLAG_HAS_CHANNEL_SWITCH; - #ifdef CONFIG_PM if (WARN_ON(wiphy->wowlan && (wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) && @@ -635,6 +629,9 @@ int wiphy_register(struct wiphy *wiphy) if (IS_ERR(rdev->wiphy.debugfsdir)) rdev->wiphy.debugfsdir = NULL; + cfg80211_debugfs_rdev_add(rdev); + nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); + if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { struct regulatory_request request; @@ -646,8 +643,6 @@ int wiphy_register(struct wiphy *wiphy) nl80211_send_reg_change_event(&request); } - cfg80211_debugfs_rdev_add(rdev); - rdev->wiphy.registered = true; rtnl_unlock(); @@ -659,8 +654,6 @@ int wiphy_register(struct wiphy *wiphy) return res; } - nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); - return 0; } EXPORT_SYMBOL(wiphy_register); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 266766b8d80b..369fc334fdad 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -605,7 +605,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, } bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, - const u8 *buf, size_t len, u32 flags, gfp_t gfp) + const u8 *buf, size_t len, u32 flags) { struct wiphy *wiphy = wdev->wiphy; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); @@ -648,7 +648,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, /* Indicate the received Action frame to user space */ if (nl80211_send_mgmt(rdev, wdev, reg->nlportid, freq, sig_mbm, - buf, len, flags, gfp)) + buf, len, flags, GFP_ATOMIC)) continue; result = true; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7257164af91b..233c54e45092 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6033,7 +6033,6 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, const struct cfg80211_bss_ies *ies; void *hdr; struct nlattr *bss; - bool tsf = false; ASSERT_WDEV_LOCK(wdev); @@ -6060,18 +6059,27 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, goto nla_put_failure; rcu_read_lock(); + /* indicate whether we have probe response data or not */ + if (rcu_access_pointer(res->proberesp_ies) && + nla_put_flag(msg, NL80211_BSS_PRESP_DATA)) + goto fail_unlock_rcu; + + /* this pointer prefers to be pointed to probe response data + * but is always valid + */ ies = rcu_dereference(res->ies); if (ies) { if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf)) goto fail_unlock_rcu; - tsf = true; if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS, ies->len, ies->data)) goto fail_unlock_rcu; } + + /* and this pointer is always (unless driver didn't know) beacon data */ ies = rcu_dereference(res->beacon_ies); - if (ies) { - if (!tsf && nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf)) + if (ies && ies->from_beacon) { + if (nla_put_u64(msg, NL80211_BSS_BEACON_TSF, ies->tsf)) goto fail_unlock_rcu; if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES, ies->len, ies->data)) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 0798c62e6085..620a4b40d466 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -884,6 +884,7 @@ struct cfg80211_bss* cfg80211_inform_bss_width(struct wiphy *wiphy, struct ieee80211_channel *rx_channel, enum nl80211_bss_scan_width scan_width, + enum cfg80211_bss_frame_type ftype, const u8 *bssid, u64 tsf, u16 capability, u16 beacon_interval, const u8 *ie, size_t ielen, s32 signal, gfp_t gfp) @@ -911,21 +912,32 @@ cfg80211_inform_bss_width(struct wiphy *wiphy, tmp.pub.beacon_interval = beacon_interval; tmp.pub.capability = capability; /* - * Since we do not know here whether the IEs are from a Beacon or Probe + * If we do not know here whether the IEs are from a Beacon or Probe * Response frame, we need to pick one of the options and only use it * with the driver that does not provide the full Beacon/Probe Response * frame. Use Beacon frame pointer to avoid indicating that this should * override the IEs pointer should we have received an earlier * indication of Probe Response data. */ - ies = kmalloc(sizeof(*ies) + ielen, gfp); + ies = kzalloc(sizeof(*ies) + ielen, gfp); if (!ies) return NULL; ies->len = ielen; ies->tsf = tsf; + ies->from_beacon = false; memcpy(ies->data, ie, ielen); - rcu_assign_pointer(tmp.pub.beacon_ies, ies); + switch (ftype) { + case CFG80211_BSS_FTYPE_BEACON: + ies->from_beacon = true; + /* fall through to assign */ + case CFG80211_BSS_FTYPE_UNKNOWN: + rcu_assign_pointer(tmp.pub.beacon_ies, ies); + break; + case CFG80211_BSS_FTYPE_PRESP: + rcu_assign_pointer(tmp.pub.proberesp_ies, ies); + break; + } rcu_assign_pointer(tmp.pub.ies, ies); signal_valid = abs(rx_channel->center_freq - channel->center_freq) <= @@ -982,11 +994,12 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy, if (!channel) return NULL; - ies = kmalloc(sizeof(*ies) + ielen, gfp); + ies = kzalloc(sizeof(*ies) + ielen, gfp); if (!ies) return NULL; ies->len = ielen; ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); + ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control); memcpy(ies->data, mgmt->u.probe_resp.variable, ielen); if (ieee80211_is_probe_resp(mgmt->frame_control)) diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index c51e8f7b8653..499d6c18a8ce 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -166,11 +166,7 @@ static int xfrm_output_gso(struct sk_buff *skb) err = xfrm_output2(segs); if (unlikely(err)) { - while ((segs = nskb)) { - nskb = segs->next; - segs->next = NULL; - kfree_skb(segs); - } + kfree_skb_list(nskb); return err; } |