diff options
589 files changed, 22078 insertions, 4360 deletions
@@ -665,6 +665,7 @@ Tomeu Vizoso <tomeu@tomeuvizoso.net> <tomeu.vizoso@collabora.com> Thomas Graf <tgraf@suug.ch> Thomas Körper <socketcan@esd.eu> <thomas.koerper@esd.eu> Thomas Pedersen <twp@codeaurora.org> +Thorsten Blum <thorsten.blum@linux.dev> <thorsten.blum@toblux.com> Tiezhu Yang <yangtiezhu@loongson.cn> <kernelpatch@126.com> Tingwei Zhang <quic_tingwei@quicinc.com> <tingwei@codeaurora.org> Tirupathi Reddy <quic_tirupath@quicinc.com> <tirupath@codeaurora.org> diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 69af2173555f..6d02168d78be 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1599,6 +1599,15 @@ The following nested keys are defined. pglazyfreed (npn) Amount of reclaimed lazyfree pages + swpin_zero + Number of pages swapped into memory and filled with zero, where I/O + was optimized out because the page content was detected to be zero + during swapout. + + swpout_zero + Number of zero-filled pages swapped out with I/O skipped due to the + content being detected as zero. + zswpin Number of pages moved in to memory from zswap. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 1518343bbe22..ea717cdaf581 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1546,6 +1546,7 @@ failslab= fail_usercopy= fail_page_alloc= + fail_skb_realloc= fail_make_request=[KNL] General fault injection mechanism. Format: <interval>,<probability>,<space>,<times> @@ -6688,7 +6689,7 @@ 0: no polling (default) thp_anon= [KNL] - Format: <size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state> + Format: <size>[KMG],<size>[KMG]:<state>;<size>[KMG]-<size>[KMG]:<state> state is one of "always", "madvise", "never" or "inherit". Control the default behavior of the system with respect to anonymous transparent hugepages. @@ -6727,6 +6728,15 @@ torture.verbose_sleep_duration= [KNL] Duration of each verbose-printk() sleep in jiffies. + tpm.disable_pcr_integrity= [HW,TPM] + Do not protect PCR registers from unintended physical + access, or interposers in the bus by the means of + having an integrity protected session wrapped around + TPM2_PCR_Extend command. Consider this in a situation + where TPM is heavily utilized by IMA, thus protection + causing a major performance hit, and the space where + machines are deployed is by other means guarded. + tpm_suspend_pcr=[HW,TPM] Format: integer pcr id Specify that at suspend time, the tpm driver diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index cfdd16a52e39..a1bb495eab59 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -303,7 +303,7 @@ control by passing the parameter ``transparent_hugepage=always`` or kernel command line. Alternatively, each supported anonymous THP size can be controlled by -passing ``thp_anon=<size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state>``, +passing ``thp_anon=<size>[KMG],<size>[KMG]:<state>;<size>[KMG]-<size>[KMG]:<state>``, where ``<size>`` is the THP size (must be a power of 2 of PAGE_SIZE and supported anonymous THP) and ``<state>`` is one of ``always``, ``madvise``, ``never`` or ``inherit``. diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml index 121a4bbd147b..4722847dc862 100644 --- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml +++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml @@ -34,6 +34,7 @@ properties: - microchip,ksz9563 - microchip,ksz8563 - microchip,ksz8567 + - microchip,lan9646 reset-gpios: description: diff --git a/Documentation/devicetree/bindings/net/mdio-mux-gpio.yaml b/Documentation/devicetree/bindings/net/mdio-mux-gpio.yaml index 71c25c4580ea..cc674b21588c 100644 --- a/Documentation/devicetree/bindings/net/mdio-mux-gpio.yaml +++ b/Documentation/devicetree/bindings/net/mdio-mux-gpio.yaml @@ -53,37 +53,21 @@ examples: ethernet-phy@1 { reg = <1>; - marvell,reg-init = <3 0x10 0 0x5777>, - <3 0x11 0 0x00aa>, - <3 0x12 0 0x4105>, - <3 0x13 0 0x0a60>; interrupt-parent = <&gpio>; interrupts = <10 8>; /* Pin 10, active low */ }; ethernet-phy@2 { reg = <2>; - marvell,reg-init = <3 0x10 0 0x5777>, - <3 0x11 0 0x00aa>, - <3 0x12 0 0x4105>, - <3 0x13 0 0x0a60>; interrupt-parent = <&gpio>; interrupts = <10 8>; /* Pin 10, active low */ }; ethernet-phy@3 { reg = <3>; - marvell,reg-init = <3 0x10 0 0x5777>, - <3 0x11 0 0x00aa>, - <3 0x12 0 0x4105>, - <3 0x13 0 0x0a60>; interrupt-parent = <&gpio>; interrupts = <10 8>; /* Pin 10, active low */ }; ethernet-phy@4 { reg = <4>; - marvell,reg-init = <3 0x10 0 0x5777>, - <3 0x11 0 0x00aa>, - <3 0x12 0 0x4105>, - <3 0x13 0 0x0a60>; interrupt-parent = <&gpio>; interrupts = <10 8>; /* Pin 10, active low */ }; @@ -96,37 +80,21 @@ examples: ethernet-phy@1 { reg = <1>; - marvell,reg-init = <3 0x10 0 0x5777>, - <3 0x11 0 0x00aa>, - <3 0x12 0 0x4105>, - <3 0x13 0 0x0a60>; interrupt-parent = <&gpio>; interrupts = <12 8>; /* Pin 12, active low */ }; ethernet-phy@2 { reg = <2>; - marvell,reg-init = <3 0x10 0 0x5777>, - <3 0x11 0 0x00aa>, - <3 0x12 0 0x4105>, - <3 0x13 0 0x0a60>; interrupt-parent = <&gpio>; interrupts = <12 8>; /* Pin 12, active low */ }; ethernet-phy@3 { reg = <3>; - marvell,reg-init = <3 0x10 0 0x5777>, - <3 0x11 0 0x00aa>, - <3 0x12 0 0x4105>, - <3 0x13 0 0x0a60>; interrupt-parent = <&gpio>; interrupts = <12 8>; /* Pin 12, active low */ }; ethernet-phy@4 { reg = <4>; - marvell,reg-init = <3 0x10 0 0x5777>, - <3 0x11 0 0x00aa>, - <3 0x12 0 0x4105>, - <3 0x13 0 0x0a60>; interrupt-parent = <&gpio>; interrupts = <12 8>; /* Pin 12, active low */ }; diff --git a/Documentation/devicetree/bindings/net/sff,sfp.yaml b/Documentation/devicetree/bindings/net/sff,sfp.yaml index 90611b598d2b..15616ad737f5 100644 --- a/Documentation/devicetree/bindings/net/sff,sfp.yaml +++ b/Documentation/devicetree/bindings/net/sff,sfp.yaml @@ -132,7 +132,7 @@ examples: pinctrl-names = "default"; pinctrl-0 = <&cpm_phy0_pins &cps_phy0_pins>; reg = <0>; - interrupt = <&cpm_gpio2 18 IRQ_TYPE_EDGE_FALLING>; + interrupts = <18 IRQ_TYPE_EDGE_FALLING>; sfp = <&sfp2>; }; }; diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml index 8675d7d0215c..a71fdf05bc1e 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml @@ -50,6 +50,9 @@ properties: vddrfa1p7-supply: description: VDD_RFA_1P7 supply regulator handle + vddrfa1p8-supply: + description: VDD_RFA_1P8 supply regulator handle + vddpcie0p9-supply: description: VDD_PCIE_0P9 supply regulator handle @@ -77,6 +80,22 @@ allOf: - vddrfa1p7-supply - vddpcie0p9-supply - vddpcie1p8-supply + - if: + properties: + compatible: + contains: + const: pci17cb,1103 + then: + required: + - vddrfacmn-supply + - vddaon-supply + - vddwlcx-supply + - vddwlmx-supply + - vddrfa0p8-supply + - vddrfa1p2-supply + - vddrfa1p8-supply + - vddpcie0p9-supply + - vddpcie1p8-supply additionalProperties: false @@ -99,6 +118,16 @@ examples: compatible = "pci17cb,1103"; reg = <0x10000 0x0 0x0 0x0 0x0>; + vddrfacmn-supply = <&vreg_pmu_rfa_cmn_0p8>; + vddaon-supply = <&vreg_pmu_aon_0p8>; + vddwlcx-supply = <&vreg_pmu_wlcx_0p8>; + vddwlmx-supply = <&vreg_pmu_wlmx_0p8>; + vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>; + vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>; + vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>; + vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; + vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>; + qcom,ath11k-calibration-variant = "LE_X13S"; }; }; diff --git a/Documentation/fault-injection/fault-injection.rst b/Documentation/fault-injection/fault-injection.rst index 8b8aeea71c68..1c14ba08fbfc 100644 --- a/Documentation/fault-injection/fault-injection.rst +++ b/Documentation/fault-injection/fault-injection.rst @@ -45,6 +45,32 @@ Available fault injection capabilities ALLOW_ERROR_INJECTION() macro, by setting debugfs entries under /sys/kernel/debug/fail_function. No boot option supported. +- fail_skb_realloc + + inject skb (socket buffer) reallocation events into the network path. The + primary goal is to identify and prevent issues related to pointer + mismanagement in the network subsystem. By forcing skb reallocation at + strategic points, this feature creates scenarios where existing pointers to + skb headers become invalid. + + When the fault is injected and the reallocation is triggered, cached pointers + to skb headers and data no longer reference valid memory locations. This + deliberate invalidation helps expose code paths where proper pointer updating + is neglected after a reallocation event. + + By creating these controlled fault scenarios, the system can catch instances + where stale pointers are used, potentially leading to memory corruption or + system instability. + + To select the interface to act on, write the network name to + /sys/kernel/debug/fail_skb_realloc/devname. + If this field is left empty (which is the default value), skb reallocation + will be forced on all network interfaces. + + The effectiveness of this fault detection is enhanced when KASAN is + enabled, as it helps identify invalid memory references and use-after-free + (UAF) issues. + - NVMe fault injection inject NVMe status code and retry flag on devices permitted by setting @@ -216,6 +242,19 @@ configuration of fault-injection capabilities. use a negative errno, you better use 'printf' instead of 'echo', e.g.: $ printf %#x -12 > retval +- /sys/kernel/debug/fail_skb_realloc/devname: + + Specifies the network interface on which to force SKB reallocation. If + left empty, SKB reallocation will be applied to all network interfaces. + + Example usage:: + + # Force skb reallocation on eth0 + echo "eth0" > /sys/kernel/debug/fail_skb_realloc/devname + + # Clear the selection and force skb reallocation on all interfaces + echo "" > /sys/kernel/debug/fail_skb_realloc/devname + Boot option ^^^^^^^^^^^ @@ -227,6 +266,7 @@ use the boot option:: fail_usercopy= fail_make_request= fail_futex= + fail_skb_realloc= mmc_core.fail_request=<interval>,<probability>,<space>,<times> proc entries diff --git a/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst b/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst index 1e196cb9ce25..af7db0e91f6b 100644 --- a/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst +++ b/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst @@ -14,6 +14,7 @@ Contents - `Basic packet flow`_ - `Devlink health reporters`_ - `Quality of service`_ +- `RVU representors`_ Overview ======== @@ -340,3 +341,93 @@ Setup HTB offload # tc class add dev <interface> parent 1: classid 1:2 htb rate 10Gbit prio 2 quantum 188416 # tc class add dev <interface> parent 1: classid 1:3 htb rate 10Gbit prio 2 quantum 32768 + + +RVU Representors +================ + +RVU representor driver adds support for creation of representor devices for +RVU PFs' VFs in the system. Representor devices are created when user enables +the switchdev mode. +Switchdev mode can be enabled either before or after setting up SRIOV numVFs. +All representor devices share a single NIXLF but each has a dedicated Rx/Tx +queues. RVU PF representor driver registers a separate netdev for each +Rx/Tx queue pair. + +Current HW does not support built-in switch which can do L2 learning and +forwarding packets between representee and representor. Hence, packet path +between representee and it's representor is achieved by setting up appropriate +NPC MCAM filters. +Transmit packets matching these filters will be loopbacked through hardware +loopback channel/interface (i.e, instead of sending them out of MAC interface). +Which will again match the installed filters and will be forwarded. +This way representee => representor and representor => representee packet +path is achieved. These rules get installed when representors are created +and gets active/deactivate based on the representor/representee interface state. + +Usage example: + + - Change device to switchdev mode:: + + # devlink dev eswitch set pci/0002:1c:00.0 mode switchdev + + - List of representor devices on the system:: + + # ip link show + Rpf1vf0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state DOWN mode DEFAULT group default qlen 1000 link/ether f6:43:83:ee:26:21 brd ff:ff:ff:ff:ff:ff + Rpf1vf1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state DOWN mode DEFAULT group default qlen 1000 link/ether 12:b2:54:0e:24:54 brd ff:ff:ff:ff:ff:ff + Rpf1vf2: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state DOWN mode DEFAULT group default qlen 1000 link/ether 4a:12:c4:4c:32:62 brd ff:ff:ff:ff:ff:ff + Rpf1vf3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state DOWN mode DEFAULT group default qlen 1000 link/ether ca:cb:68:0e:e2:6e brd ff:ff:ff:ff:ff:ff + Rpf2vf0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state DOWN mode DEFAULT group default qlen 1000 link/ether 06:cc:ad:b4:f0:93 brd ff:ff:ff:ff:ff:ff + + +To delete the representors devices from the system. Change the device to legacy mode. + + - Change device to legacy mode:: + + # devlink dev eswitch set pci/0002:1c:00.0 mode legacy + +RVU representors can be managed using devlink ports +(see :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>`) interface. + + - Show devlink ports of representors:: + + # devlink port + pci/0002:1c:00.0/0: type eth netdev Rpf1vf0 flavour physical port 0 splittable false + pci/0002:1c:00.0/1: type eth netdev Rpf1vf1 flavour pcivf controller 0 pfnum 1 vfnum 1 external false splittable false + pci/0002:1c:00.0/2: type eth netdev Rpf1vf2 flavour pcivf controller 0 pfnum 1 vfnum 2 external false splittable false + pci/0002:1c:00.0/3: type eth netdev Rpf1vf3 flavour pcivf controller 0 pfnum 1 vfnum 3 external false splittable false + +Function attributes +=================== + +The RVU representor support function attributes for representors. +Port function configuration of the representors are supported through devlink eswitch port. + +MAC address setup +----------------- + +RVU representor driver support devlink port function attr mechanism to setup MAC +address. (refer to Documentation/networking/devlink/devlink-port.rst) + + - To setup MAC address for port 2:: + + # devlink port function set pci/0002:1c:00.0/2 hw_addr 5c:a1:1b:5e:43:11 + # devlink port show pci/0002:1c:00.0/2 + pci/0002:1c:00.0/2: type eth netdev Rpf1vf2 flavour pcivf controller 0 pfnum 1 vfnum 2 external false splittable false + function: + hw_addr 5c:a1:1b:5e:43:11 + + +TC offload +========== + +The rvu representor driver implements support for offloading tc rules using port representors. + + - Drop packets with vlan id 3:: + + # tc filter add dev Rpf1vf0 protocol 802.1Q parent ffff: flower vlan_id 3 vlan_ethtype ipv4 skip_sw action drop + + - Redirect packets with vlan id 5 and IPv4 packets to eth1, after stripping vlan header.:: + + # tc filter add dev Rpf1vf0 ingress protocol 802.1Q flower vlan_id 5 vlan_ethtype ipv4 skip_sw action vlan pop action mirred ingress redirect dev eth1 diff --git a/Documentation/networking/devmem.rst b/Documentation/networking/devmem.rst index a55bf21f671c..d95363645331 100644 --- a/Documentation/networking/devmem.rst +++ b/Documentation/networking/devmem.rst @@ -225,6 +225,15 @@ The user must ensure the tokens are returned to the kernel in a timely manner. Failure to do so will exhaust the limited dmabuf that is bound to the RX queue and will lead to packet drops. +The user must pass no more than 128 tokens, with no more than 1024 total frags +among the token->token_count across all the tokens. If the user provides more +than 1024 frags, the kernel will free up to 1024 frags and return early. + +The kernel returns the number of actual frags freed. The number of frags freed +can be less than the tokens provided by the user in case of: + +(a) an internal kernel leak bug. +(b) the user passed more than 1024 frags. Implementation & Caveats ======================== diff --git a/Documentation/security/landlock.rst b/Documentation/security/landlock.rst index 36f26501fd15..59ecdb1c0d4d 100644 --- a/Documentation/security/landlock.rst +++ b/Documentation/security/landlock.rst @@ -11,18 +11,18 @@ Landlock LSM: kernel documentation Landlock's goal is to create scoped access-control (i.e. sandboxing). To harden a whole system, this feature should be available to any process, -including unprivileged ones. Because such process may be compromised or +including unprivileged ones. Because such a process may be compromised or backdoored (i.e. untrusted), Landlock's features must be safe to use from the kernel and other processes point of view. Landlock's interface must therefore expose a minimal attack surface. Landlock is designed to be usable by unprivileged processes while following the system security policy enforced by other access control mechanisms (e.g. DAC, -LSM). Indeed, a Landlock rule shall not interfere with other access-controls -enforced on the system, only add more restrictions. +LSM). A Landlock rule shall not interfere with other access-controls enforced +on the system, only add more restrictions. Any user can enforce Landlock rulesets on their processes. They are merged and -evaluated according to the inherited ones in a way that ensures that only more +evaluated against inherited rulesets in a way that ensures that only more constraints can be added. User space documentation can be found here: @@ -43,7 +43,7 @@ Guiding principles for safe access controls only impact the processes requesting them. * Resources (e.g. file descriptors) directly obtained from the kernel by a sandboxed process shall retain their scoped accesses (at the time of resource - acquisition) whatever process use them. + acquisition) whatever process uses them. Cf. `File descriptor access rights`_. Design choices @@ -71,7 +71,7 @@ the same results, when they are executed under the same Landlock domain. Taking the ``LANDLOCK_ACCESS_FS_TRUNCATE`` right as an example, it may be allowed to open a file for writing without being allowed to :manpage:`ftruncate` the resulting file descriptor if the related file -hierarchy doesn't grant such access right. The following sequences of +hierarchy doesn't grant that access right. The following sequences of operations have the same semantic and should then have the same result: * ``truncate(path);`` @@ -81,7 +81,7 @@ Similarly to file access modes (e.g. ``O_RDWR``), Landlock access rights attached to file descriptors are retained even if they are passed between processes (e.g. through a Unix domain socket). Such access rights will then be enforced even if the receiving process is not sandboxed by Landlock. Indeed, -this is required to keep a consistent access control over the whole system, and +this is required to keep access controls consistent over the whole system, and this avoids unattended bypasses through file descriptor passing (i.e. confused deputy attack). diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst index c8d3e46badc5..d639c61cb472 100644 --- a/Documentation/userspace-api/landlock.rst +++ b/Documentation/userspace-api/landlock.rst @@ -8,13 +8,13 @@ Landlock: unprivileged access control ===================================== :Author: Mickaël Salaün -:Date: September 2024 +:Date: October 2024 -The goal of Landlock is to enable to restrict ambient rights (e.g. global +The goal of Landlock is to enable restriction of ambient rights (e.g. global filesystem or network access) for a set of processes. Because Landlock -is a stackable LSM, it makes possible to create safe security sandboxes as new -security layers in addition to the existing system-wide access-controls. This -kind of sandbox is expected to help mitigate the security impact of bugs or +is a stackable LSM, it makes it possible to create safe security sandboxes as +new security layers in addition to the existing system-wide access-controls. +This kind of sandbox is expected to help mitigate the security impact of bugs or unexpected/malicious behaviors in user space applications. Landlock empowers any process, including unprivileged ones, to securely restrict themselves. @@ -86,8 +86,8 @@ to be explicit about the denied-by-default access rights. LANDLOCK_SCOPE_SIGNAL, }; -Because we may not know on which kernel version an application will be -executed, it is safer to follow a best-effort security approach. Indeed, we +Because we may not know which kernel version an application will be executed +on, it is safer to follow a best-effort security approach. Indeed, we should try to protect users as much as possible whatever the kernel they are using. @@ -129,7 +129,7 @@ version, and only use the available subset of access rights: LANDLOCK_SCOPE_SIGNAL); } -This enables to create an inclusive ruleset that will contain our rules. +This enables the creation of an inclusive ruleset that will contain our rules. .. code-block:: c @@ -219,42 +219,41 @@ If the ``landlock_restrict_self`` system call succeeds, the current thread is now restricted and this policy will be enforced on all its subsequently created children as well. Once a thread is landlocked, there is no way to remove its security policy; only adding more restrictions is allowed. These threads are -now in a new Landlock domain, merge of their parent one (if any) with the new -ruleset. +now in a new Landlock domain, which is a merger of their parent one (if any) +with the new ruleset. Full working code can be found in `samples/landlock/sandboxer.c`_. Good practices -------------- -It is recommended setting access rights to file hierarchy leaves as much as +It is recommended to set access rights to file hierarchy leaves as much as possible. For instance, it is better to be able to have ``~/doc/`` as a read-only hierarchy and ``~/tmp/`` as a read-write hierarchy, compared to ``~/`` as a read-only hierarchy and ``~/tmp/`` as a read-write hierarchy. Following this good practice leads to self-sufficient hierarchies that do not depend on their location (i.e. parent directories). This is particularly relevant when we want to allow linking or renaming. Indeed, having consistent -access rights per directory enables to change the location of such directory +access rights per directory enables changing the location of such directories without relying on the destination directory access rights (except those that are required for this operation, see ``LANDLOCK_ACCESS_FS_REFER`` documentation). Having self-sufficient hierarchies also helps to tighten the required access rights to the minimal set of data. This also helps avoid sinkhole directories, -i.e. directories where data can be linked to but not linked from. However, +i.e. directories where data can be linked to but not linked from. However, this depends on data organization, which might not be controlled by developers. In this case, granting read-write access to ``~/tmp/``, instead of write-only -access, would potentially allow to move ``~/tmp/`` to a non-readable directory +access, would potentially allow moving ``~/tmp/`` to a non-readable directory and still keep the ability to list the content of ``~/tmp/``. Layers of file path access rights --------------------------------- Each time a thread enforces a ruleset on itself, it updates its Landlock domain -with a new layer of policy. Indeed, this complementary policy is stacked with -the potentially other rulesets already restricting this thread. A sandboxed -thread can then safely add more constraints to itself with a new enforced -ruleset. +with a new layer of policy. This complementary policy is stacked with any +other rulesets potentially already restricting this thread. A sandboxed thread +can then safely add more constraints to itself with a new enforced ruleset. One policy layer grants access to a file path if at least one of its rules encountered on the path grants the access. A sandboxed thread can only access @@ -265,7 +264,7 @@ etc.). Bind mounts and OverlayFS ------------------------- -Landlock enables to restrict access to file hierarchies, which means that these +Landlock enables restricting access to file hierarchies, which means that these access rights can be propagated with bind mounts (cf. Documentation/filesystems/sharedsubtree.rst) but not with Documentation/filesystems/overlayfs.rst. @@ -278,21 +277,21 @@ access to multiple file hierarchies at the same time, whether these hierarchies are the result of bind mounts or not. An OverlayFS mount point consists of upper and lower layers. These layers are -combined in a merge directory, result of the mount point. This merge hierarchy -may include files from the upper and lower layers, but modifications performed -on the merge hierarchy only reflects on the upper layer. From a Landlock -policy point of view, each OverlayFS layers and merge hierarchies are -standalone and contains their own set of files and directories, which is -different from bind mounts. A policy restricting an OverlayFS layer will not -restrict the resulted merged hierarchy, and vice versa. Landlock users should -then only think about file hierarchies they want to allow access to, regardless -of the underlying filesystem. +combined in a merge directory, and that merged directory becomes available at +the mount point. This merge hierarchy may include files from the upper and +lower layers, but modifications performed on the merge hierarchy only reflect +on the upper layer. From a Landlock policy point of view, all OverlayFS layers +and merge hierarchies are standalone and each contains their own set of files +and directories, which is different from bind mounts. A policy restricting an +OverlayFS layer will not restrict the resulted merged hierarchy, and vice versa. +Landlock users should then only think about file hierarchies they want to allow +access to, regardless of the underlying filesystem. Inheritance ----------- Every new thread resulting from a :manpage:`clone(2)` inherits Landlock domain -restrictions from its parent. This is similar to the seccomp inheritance (cf. +restrictions from its parent. This is similar to seccomp inheritance (cf. Documentation/userspace-api/seccomp_filter.rst) or any other LSM dealing with task's :manpage:`credentials(7)`. For instance, one process's thread may apply Landlock rules to itself, but they will not be automatically applied to other @@ -311,8 +310,8 @@ Ptrace restrictions A sandboxed process has less privileges than a non-sandboxed process and must then be subject to additional restrictions when manipulating another process. To be allowed to use :manpage:`ptrace(2)` and related syscalls on a target -process, a sandboxed process should have a subset of the target process rules, -which means the tracee must be in a sub-domain of the tracer. +process, a sandboxed process should have a superset of the target process's +access rights, which means the tracee must be in a sub-domain of the tracer. IPC scoping ----------- @@ -322,7 +321,7 @@ interactions between sandboxes. Each Landlock domain can be explicitly scoped for a set of actions by specifying it on a ruleset. For example, if a sandboxed process should not be able to :manpage:`connect(2)` to a non-sandboxed process through abstract :manpage:`unix(7)` sockets, we can -specify such restriction with ``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET``. +specify such a restriction with ``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET``. Moreover, if a sandboxed process should not be able to send a signal to a non-sandboxed process, we can specify this restriction with ``LANDLOCK_SCOPE_SIGNAL``. @@ -394,7 +393,7 @@ Backward and forward compatibility Landlock is designed to be compatible with past and future versions of the kernel. This is achieved thanks to the system call attributes and the associated bitflags, particularly the ruleset's ``handled_access_fs``. Making -handled access right explicit enables the kernel and user space to have a clear +handled access rights explicit enables the kernel and user space to have a clear contract with each other. This is required to make sure sandboxing will not get stricter with a system update, which could break applications. @@ -563,33 +562,34 @@ always allowed when using a kernel that only supports the first or second ABI. Starting with the Landlock ABI version 3, it is now possible to securely control truncation thanks to the new ``LANDLOCK_ACCESS_FS_TRUNCATE`` access right. -Network support (ABI < 4) -------------------------- +TCP bind and connect (ABI < 4) +------------------------------ Starting with the Landlock ABI version 4, it is now possible to restrict TCP bind and connect actions to only a set of allowed ports thanks to the new ``LANDLOCK_ACCESS_NET_BIND_TCP`` and ``LANDLOCK_ACCESS_NET_CONNECT_TCP`` access rights. -IOCTL (ABI < 5) ---------------- +Device IOCTL (ABI < 5) +---------------------- IOCTL operations could not be denied before the fifth Landlock ABI, so :manpage:`ioctl(2)` is always allowed when using a kernel that only supports an earlier ABI. Starting with the Landlock ABI version 5, it is possible to restrict the use of -:manpage:`ioctl(2)` using the new ``LANDLOCK_ACCESS_FS_IOCTL_DEV`` right. +:manpage:`ioctl(2)` on character and block devices using the new +``LANDLOCK_ACCESS_FS_IOCTL_DEV`` right. -Abstract UNIX socket scoping (ABI < 6) --------------------------------------- +Abstract UNIX socket (ABI < 6) +------------------------------ Starting with the Landlock ABI version 6, it is possible to restrict connections to an abstract :manpage:`unix(7)` socket by setting ``LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET`` to the ``scoped`` ruleset attribute. -Signal scoping (ABI < 6) ------------------------- +Signal (ABI < 6) +---------------- Starting with the Landlock ABI version 6, it is possible to restrict :manpage:`signal(7)` sending by setting ``LANDLOCK_SCOPE_SIGNAL`` to the @@ -605,9 +605,9 @@ Build time configuration Landlock was first introduced in Linux 5.13 but it must be configured at build time with ``CONFIG_SECURITY_LANDLOCK=y``. Landlock must also be enabled at boot -time as the other security modules. The list of security modules enabled by +time like other security modules. The list of security modules enabled by default is set with ``CONFIG_LSM``. The kernel configuration should then -contains ``CONFIG_LSM=landlock,[...]`` with ``[...]`` as the list of other +contain ``CONFIG_LSM=landlock,[...]`` with ``[...]`` as the list of other potentially useful security modules for the running system (see the ``CONFIG_LSM`` help). @@ -669,7 +669,7 @@ Questions and answers What about user space sandbox managers? --------------------------------------- -Using user space process to enforce restrictions on kernel resources can lead +Using user space processes to enforce restrictions on kernel resources can lead to race conditions or inconsistent evaluations (i.e. `Incorrect mirroring of the OS code and state <https://www.ndss-symposium.org/ndss2003/traps-and-pitfalls-practical-problems-system-call-interposition-based-security-tools/>`_). diff --git a/MAINTAINERS b/MAINTAINERS index a4855581d62c..96b9344c3524 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1174,8 +1174,9 @@ F: Documentation/hid/amd-sfh* F: drivers/hid/amd-sfh-hid/ AMD SPI DRIVER -M: Sanjay R Mehta <sanju.mehta@amd.com> -S: Maintained +M: Raju Rangoju <Raju.Rangoju@amd.com> +L: linux-spi@vger.kernel.org +S: Supported F: drivers/spi/spi-amd.c AMD XGBE DRIVER @@ -14439,8 +14440,10 @@ M: Qingfang Deng <dqfext@gmail.com> M: SkyLake Huang <SkyLake.Huang@mediatek.com> L: netdev@vger.kernel.org S: Maintained -F: drivers/net/phy/mediatek-ge-soc.c -F: drivers/net/phy/mediatek-ge.c +F: drivers/net/phy/mediatek/mtk-ge-soc.c +F: drivers/net/phy/mediatek/mtk-phy-lib.c +F: drivers/net/phy/mediatek/mtk-ge.c +F: drivers/net/phy/mediatek/mtk.h F: drivers/phy/mediatek/phy-mtk-xfi-tphy.c MEDIATEK I2C CONTROLLER DRIVER @@ -19607,6 +19610,17 @@ S: Supported F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml F: drivers/i2c/busses/i2c-emev2.c +RENESAS ETHERNET AVB DRIVER +M: Paul Barker <paul.barker.ct@bp.renesas.com> +M: Niklas Söderlund <niklas.soderlund@ragnatech.se> +L: netdev@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/net/renesas,etheravb.yaml +F: drivers/net/ethernet/renesas/Kconfig +F: drivers/net/ethernet/renesas/Makefile +F: drivers/net/ethernet/renesas/ravb* + RENESAS ETHERNET SWITCH DRIVER R: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> L: netdev@vger.kernel.org @@ -19656,6 +19670,14 @@ F: Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml F: drivers/i2c/busses/i2c-rcar.c F: drivers/i2c/busses/i2c-sh_mobile.c +RENESAS R-CAR SATA DRIVER +M: Geert Uytterhoeven <geert+renesas@glider.be> +L: linux-ide@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml +F: drivers/ata/sata_rcar.c + RENESAS R-CAR THERMAL DRIVERS M: Niklas Söderlund <niklas.soderlund@ragnatech.se> L: linux-renesas-soc@vger.kernel.org @@ -19731,6 +19753,17 @@ S: Supported F: Documentation/devicetree/bindings/i2c/renesas,rzv2m.yaml F: drivers/i2c/busses/i2c-rzv2m.c +RENESAS SUPERH ETHERNET DRIVER +M: Niklas Söderlund <niklas.soderlund@ragnatech.se> +L: netdev@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/net/renesas,ether.yaml +F: drivers/net/ethernet/renesas/Kconfig +F: drivers/net/ethernet/renesas/Makefile +F: drivers/net/ethernet/renesas/sh_eth* +F: include/linux/sh_eth.h + RENESAS USB PHY DRIVER M: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> L: linux-renesas-soc@vger.kernel.org @@ -21653,6 +21686,15 @@ S: Supported W: https://github.com/thesofproject/linux/ F: sound/soc/sof/ +SOUND - GENERIC SOUND CARD (Simple-Audio-Card, Audio-Graph-Card) +M: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> +S: Supported +L: linux-sound@vger.kernel.org +F: sound/soc/generic/ +F: include/sound/simple_card* +F: Documentation/devicetree/bindings/sound/simple-card.yaml +F: Documentation/devicetree/bindings/sound/audio-graph*.yaml + SOUNDWIRE SUBSYSTEM M: Vinod Koul <vkoul@kernel.org> M: Bard Liao <yung-chuan.liao@linux.intel.com> @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 12 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fd9df6dcc593..70d7f4f20225 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2214,6 +2214,7 @@ config ARM64_SME bool "ARM Scalable Matrix Extension support" default y depends on ARM64_SVE + depends on BROKEN help The Scalable Matrix Extension (SME) is an extension to the AArch64 execution state which utilises a substantial subset of the SVE diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 9e39217b4afb..798d965760d4 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -6,6 +6,8 @@ #ifndef BUILD_VDSO #include <linux/compiler.h> +#include <linux/fs.h> +#include <linux/shmem_fs.h> #include <linux/types.h> static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, @@ -31,19 +33,21 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) -static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) +static inline unsigned long arch_calc_vm_flag_bits(struct file *file, + unsigned long flags) { /* * Only allow MTE on anonymous mappings as these are guaranteed to be * backed by tags-capable memory. The vm_flags may be overridden by a * filesystem supporting MTE (RAM-based). */ - if (system_supports_mte() && (flags & MAP_ANONYMOUS)) + if (system_supports_mte() && + ((flags & MAP_ANONYMOUS) || shmem_file(file))) return VM_MTE_ALLOWED; return 0; } -#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) +#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags) static inline bool arch_validate_prot(unsigned long prot, unsigned long addr __always_unused) diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 5fc3af9f8f29..341174bf9106 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -26,10 +26,6 @@ void update_freq_counters_refs(void); #define arch_scale_freq_invariant topology_scale_freq_invariant #define arch_scale_freq_ref topology_get_freq_ref -#ifdef CONFIG_ACPI_CPPC_LIB -#define arch_init_invariance_cppc topology_init_cpu_capacity_cppc -#endif - /* Replace task scheduler's default cpu-invariant accounting */ #define arch_scale_cpu_capacity topology_get_cpu_scale diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 77006df20a75..6d21971ae559 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -1367,6 +1367,7 @@ static void sve_init_regs(void) } else { fpsimd_to_sve(current); current->thread.fp_type = FP_STATE_SVE; + fpsimd_flush_task_state(current); } } diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S index 487381164ff6..2def9d0dd3dd 100644 --- a/arch/arm64/kernel/smccc-call.S +++ b/arch/arm64/kernel/smccc-call.S @@ -7,48 +7,19 @@ #include <asm/asm-offsets.h> #include <asm/assembler.h> -#include <asm/thread_info.h> - -/* - * If we have SMCCC v1.3 and (as is likely) no SVE state in - * the registers then set the SMCCC hint bit to say there's no - * need to preserve it. Do this by directly adjusting the SMCCC - * function value which is already stored in x0 ready to be called. - */ -SYM_FUNC_START(__arm_smccc_sve_check) - - ldr_l x16, smccc_has_sve_hint - cbz x16, 2f - - get_current_task x16 - ldr x16, [x16, #TSK_TI_FLAGS] - tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state? - tbnz x16, #TIF_SVE, 2f // Does that state include SVE? - -1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT - -2: ret -SYM_FUNC_END(__arm_smccc_sve_check) -EXPORT_SYMBOL(__arm_smccc_sve_check) .macro SMCCC instr - stp x29, x30, [sp, #-16]! - mov x29, sp -alternative_if ARM64_SVE - bl __arm_smccc_sve_check -alternative_else_nop_endif \instr #0 - ldr x4, [sp, #16] + ldr x4, [sp] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] - ldr x4, [sp, #24] + ldr x4, [sp, #8] cbz x4, 1f /* no quirk structure */ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS] cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6 b.ne 1f str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] -1: ldp x29, x30, [sp], #16 - ret +1: ret .endm /* diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h index c6bce5fbff57..7f52bd31b9d4 100644 --- a/arch/loongarch/include/asm/kasan.h +++ b/arch/loongarch/include/asm/kasan.h @@ -25,6 +25,7 @@ /* 64-bit segment value. */ #define XKPRANGE_UC_SEG (0x8000) #define XKPRANGE_CC_SEG (0x9000) +#define XKPRANGE_WC_SEG (0xa000) #define XKVRANGE_VC_SEG (0xffff) /* Cached */ @@ -41,20 +42,28 @@ #define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT) #define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE) +/* WriteCombine */ +#define XKPRANGE_WC_START WRITECOMBINE_BASE +#define XKPRANGE_WC_SIZE XRANGE_SIZE +#define XKPRANGE_WC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END +#define XKPRANGE_WC_SHADOW_SIZE (XKPRANGE_WC_SIZE >> KASAN_SHADOW_SCALE_SHIFT) +#define XKPRANGE_WC_SHADOW_END (XKPRANGE_WC_KASAN_OFFSET + XKPRANGE_WC_SHADOW_SIZE) + /* VMALLOC (Cached or UnCached) */ #define XKVRANGE_VC_START MODULES_VADDR #define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE) -#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END +#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_WC_SHADOW_END #define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT) #define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE) /* KAsan shadow memory start right after vmalloc. */ #define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE) #define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET) -#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) +#define KASAN_SHADOW_END (round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1) #define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET) #define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET) +#define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET) #define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET) extern bool kasan_early_stage; diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h index e85df33f11c7..8f21567a3188 100644 --- a/arch/loongarch/include/asm/page.h +++ b/arch/loongarch/include/asm/page.h @@ -113,10 +113,7 @@ struct page *tlb_virt_to_page(unsigned long kaddr); extern int __virt_addr_valid(volatile void *kaddr); #define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr)) -#define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | \ - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index f1a74b80f22c..382a09a7152c 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -58,48 +58,48 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) return ioremap_cache(phys, size); } -static int cpu_enumerated = 0; - #ifdef CONFIG_SMP -static int set_processor_mask(u32 id, u32 flags) +static int set_processor_mask(u32 id, u32 pass) { - int nr_cpus; - int cpu, cpuid = id; - - if (!cpu_enumerated) - nr_cpus = NR_CPUS; - else - nr_cpus = nr_cpu_ids; + int cpu = -1, cpuid = id; - if (num_processors >= nr_cpus) { + if (num_processors >= NR_CPUS) { pr_warn(PREFIX "nr_cpus limit of %i reached." - " processor 0x%x ignored.\n", nr_cpus, cpuid); + " processor 0x%x ignored.\n", NR_CPUS, cpuid); return -ENODEV; } + if (cpuid == loongson_sysconf.boot_cpu_id) cpu = 0; - else - cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS); - - if (!cpu_enumerated) - set_cpu_possible(cpu, true); - if (flags & ACPI_MADT_ENABLED) { + switch (pass) { + case 1: /* Pass 1 handle enabled processors */ + if (cpu < 0) + cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS); num_processors++; set_cpu_present(cpu, true); - __cpu_number_map[cpuid] = cpu; - __cpu_logical_map[cpu] = cpuid; - } else + break; + case 2: /* Pass 2 handle disabled processors */ + if (cpu < 0) + cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS); disabled_cpus++; + break; + default: + return cpu; + } + + set_cpu_possible(cpu, true); + __cpu_number_map[cpuid] = cpu; + __cpu_logical_map[cpu] = cpuid; return cpu; } #endif static int __init -acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end) +acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_core_pic *processor = NULL; @@ -110,13 +110,30 @@ acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long en acpi_table_print_madt_entry(&header->common); #ifdef CONFIG_SMP acpi_core_pic[processor->core_id] = *processor; - set_processor_mask(processor->core_id, processor->flags); + if (processor->flags & ACPI_MADT_ENABLED) + set_processor_mask(processor->core_id, 1); #endif return 0; } static int __init +acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_core_pic *processor = NULL; + + processor = (struct acpi_madt_core_pic *)header; + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + +#ifdef CONFIG_SMP + if (!(processor->flags & ACPI_MADT_ENABLED)) + set_processor_mask(processor->core_id, 2); +#endif + + return 0; +} +static int __init acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end) { static int core = 0; @@ -143,12 +160,14 @@ static void __init acpi_process_madt(void) } #endif acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, - acpi_parse_processor, MAX_CORE_PIC); + acpi_parse_p1_processor, MAX_CORE_PIC); + + acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, + acpi_parse_p2_processor, MAX_CORE_PIC); acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, acpi_parse_eio_master, MAX_IO_PICS); - cpu_enumerated = 1; loongson_sysconf.nr_cpus = num_processors; } @@ -310,6 +329,10 @@ static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) int nid; nid = acpi_get_node(handle); + + if (nid != NUMA_NO_NODE) + nid = early_cpu_to_node(cpu); + if (nid != NUMA_NO_NODE) { set_cpuid_to_node(physid, nid); node_set(nid, numa_nodes_parsed); @@ -324,12 +347,14 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu { int cpu; - cpu = set_processor_mask(physid, ACPI_MADT_ENABLED); - if (cpu < 0) { + cpu = cpu_number_map(physid); + if (cpu < 0 || cpu >= nr_cpu_ids) { pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); - return cpu; + return -ERANGE; } + num_processors++; + set_cpu_present(cpu, true); acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index a5fc61f8b348..e5a39bbad078 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -51,11 +51,18 @@ static u64 paravt_steal_clock(int cpu) } #ifdef CONFIG_SMP +static struct smp_ops native_ops; + static void pv_send_ipi_single(int cpu, unsigned int action) { int min, old; irq_cpustat_t *info = &per_cpu(irq_stat, cpu); + if (unlikely(action == ACTION_BOOT_CPU)) { + native_ops.send_ipi_single(cpu, action); + return; + } + old = atomic_fetch_or(BIT(action), &info->message); if (old) return; @@ -75,6 +82,11 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) if (cpumask_empty(mask)) return; + if (unlikely(action == ACTION_BOOT_CPU)) { + native_ops.send_ipi_mask(mask, action); + return; + } + action = BIT(action); for_each_cpu(i, mask) { info = &per_cpu(irq_stat, i); @@ -147,6 +159,8 @@ static void pv_init_ipi(void) { int r, swi; + /* Init native ipi irq for ACTION_BOOT_CPU */ + native_ops.init_ipi(); swi = get_percpu_irq(INT_SWI0); if (swi < 0) panic("SWI0 IRQ mapping failed\n"); @@ -193,6 +207,7 @@ int __init pv_ipi_init(void) return 0; #ifdef CONFIG_SMP + native_ops = mp_ops; mp_ops.init_ipi = pv_init_ipi; mp_ops.send_ipi_single = pv_send_ipi_single; mp_ops.send_ipi_mask = pv_send_ipi_mask; diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 9afc2d8b3414..5d59e9ce2772 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -302,7 +302,7 @@ static void __init fdt_smp_setup(void) __cpu_number_map[cpuid] = cpu; __cpu_logical_map[cpu] = cpuid; - early_numa_add_cpu(cpu, 0); + early_numa_add_cpu(cpuid, 0); set_cpuid_to_node(cpuid, 0); } @@ -331,11 +331,11 @@ void __init loongson_prepare_cpus(unsigned int max_cpus) int i = 0; parse_acpi_topology(); + cpu_data[0].global_id = cpu_logical_map(0); for (i = 0; i < loongson_sysconf.nr_cpus; i++) { set_cpu_present(i, true); csr_mail_send(0, __cpu_logical_map[i], 0); - cpu_data[i].global_id = __cpu_logical_map[i]; } per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; @@ -380,6 +380,7 @@ void loongson_init_secondary(void) cpu_logical_map(cpu) / loongson_sysconf.cores_per_package; cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core : cpu_logical_map(cpu) % loongson_sysconf.cores_per_package; + cpu_data[cpu].global_id = cpu_logical_map(cpu); } void loongson_smp_finish(void) diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c index 427d6b1aec09..d2681272d8f0 100644 --- a/arch/loongarch/mm/kasan_init.c +++ b/arch/loongarch/mm/kasan_init.c @@ -13,6 +13,13 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); +#ifdef __PAGETABLE_P4D_FOLDED +#define __pgd_none(early, pgd) (0) +#else +#define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \ +(__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d))) +#endif + #ifdef __PAGETABLE_PUD_FOLDED #define __p4d_none(early, p4d) (0) #else @@ -55,6 +62,9 @@ void *kasan_mem_to_shadow(const void *addr) case XKPRANGE_UC_SEG: offset = XKPRANGE_UC_SHADOW_OFFSET; break; + case XKPRANGE_WC_SEG: + offset = XKPRANGE_WC_SHADOW_OFFSET; + break; case XKVRANGE_VC_SEG: offset = XKVRANGE_VC_SHADOW_OFFSET; break; @@ -79,6 +89,8 @@ const void *kasan_shadow_to_mem(const void *shadow_addr) if (addr >= XKVRANGE_VC_SHADOW_OFFSET) return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START); + else if (addr >= XKPRANGE_WC_SHADOW_OFFSET) + return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START); else if (addr >= XKPRANGE_UC_SHADOW_OFFSET) return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START); else if (addr >= XKPRANGE_CC_SHADOW_OFFSET) @@ -142,6 +154,19 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, return pud_offset(p4dp, addr); } +static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early) +{ + if (__pgd_none(early, pgdp_get(pgdp))) { + phys_addr_t p4d_phys = early ? + __pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node); + if (!early) + memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d)); + pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys)); + } + + return p4d_offset(pgdp, addr); +} + static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, unsigned long end, int node, bool early) { @@ -178,19 +203,19 @@ static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, do { next = pud_addr_end(addr, end); kasan_pmd_populate(pudp, addr, next, node, early); - } while (pudp++, addr = next, addr != end); + } while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp))); } static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr, unsigned long end, int node, bool early) { unsigned long next; - p4d_t *p4dp = p4d_offset(pgdp, addr); + p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early); do { next = p4d_addr_end(addr, end); kasan_pud_populate(p4dp, addr, next, node, early); - } while (p4dp++, addr = next, addr != end); + } while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp))); } static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, @@ -218,7 +243,7 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end, asmlinkage void __init kasan_early_init(void) { BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); - BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE)); } static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval) @@ -233,7 +258,7 @@ static void __init clear_pgds(unsigned long start, unsigned long end) * swapper_pg_dir. pgd_clear() can't be used * here because it's nop on 2,3-level pagetable setups */ - for (; start < end; start += PGDIR_SIZE) + for (; start < end; start = pgd_addr_end(start, end)) kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0)); } @@ -243,6 +268,17 @@ void __init kasan_init(void) phys_addr_t pa_start, pa_end; /* + * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will + * overflow UINTPTR_MAX and then looks like a user space address. + * For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too + * large for Loongson-2K series whose cpu_vabits = 39. + */ + if (KASAN_SHADOW_END < vm_map_base) { + pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n"); + return; + } + + /* * PGD was populated as invalid_pmd_table or invalid_pud_table * in pagetable_init() which depends on how many levels of page * table you are using, but we had to clean the gpd of kasan diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h index 89b6beeda0b8..663f587dc789 100644 --- a/arch/parisc/include/asm/mman.h +++ b/arch/parisc/include/asm/mman.h @@ -2,6 +2,7 @@ #ifndef __ASM_MMAN_H__ #define __ASM_MMAN_H__ +#include <linux/fs.h> #include <uapi/asm/mman.h> /* PARISC cannot allow mdwe as it needs writable stacks */ @@ -11,7 +12,7 @@ static inline bool arch_memory_deny_write_exec_supported(void) } #define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported -static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) +static inline unsigned long arch_calc_vm_flag_bits(struct file *file, unsigned long flags) { /* * The stack on parisc grows upwards, so if userspace requests memory @@ -23,6 +24,6 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) return 0; } -#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) +#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags) #endif /* __ASM_MMAN_H__ */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index ba0492f9de65..ad8dc4ccdaab 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4898,6 +4898,18 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, BOOK3S_INTERRUPT_EXTERNAL, 0); else lpcr |= LPCR_MER; + } else { + /* + * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit + * unexpectedly set - for e.g. during NMI handling when all register + * states are synchronized from L0 to L1. L1 needs to inform L0 about + * MER=1 only when there are pending external interrupts. + * In the above if check, MER bit is set if there are pending + * external interrupts. Hence, explicity mask off MER bit + * here as otherwise it may generate spurious interrupts in L2 KVM + * causing an endless loop, which results in L2 guest getting hung. + */ + lpcr &= ~LPCR_MER; } } else if (vcpu->arch.pending_exceptions || vcpu->arch.doorbell_request || diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index aef70336d624..92f3664dd933 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -305,9 +305,4 @@ static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled extern void arch_scale_freq_tick(void); #define arch_scale_freq_tick arch_scale_freq_tick -#ifdef CONFIG_ACPI_CPPC_LIB -void init_freq_invariance_cppc(void); -#define arch_init_invariance_cppc init_freq_invariance_cppc -#endif - #endif /* _ASM_X86_TOPOLOGY_H */ diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c index 956984054bf3..aab9d0570841 100644 --- a/arch/x86/kernel/acpi/cppc.c +++ b/arch/x86/kernel/acpi/cppc.c @@ -110,7 +110,7 @@ static void amd_set_max_freq_ratio(void) static DEFINE_MUTEX(freq_invariance_lock); -void init_freq_invariance_cppc(void) +static inline void init_freq_invariance_cppc(void) { static bool init_done; @@ -127,6 +127,11 @@ void init_freq_invariance_cppc(void) mutex_unlock(&freq_invariance_lock); } +void acpi_processor_init_invariance_cppc(void) +{ + init_freq_invariance_cppc(); +} + /* * Get the highest performance register value. * @cpu: CPU from which to get highest performance. diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 2098dc689088..95c6beb8ce27 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2629,19 +2629,26 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - if (apic->apicv_active) { - /* irr_pending is always true when apicv is activated. */ - apic->irr_pending = true; + /* + * When APICv is enabled, KVM must always search the IRR for a pending + * IRQ, as other vCPUs and devices can set IRR bits even if the vCPU + * isn't running. If APICv is disabled, KVM _should_ search the IRR + * for a pending IRQ. But KVM currently doesn't ensure *all* hardware, + * e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching + * the IRR at this time could race with IRQ delivery from hardware that + * still sees APICv as being enabled. + * + * FIXME: Ensure other vCPUs and devices observe the change in APICv + * state prior to updating KVM's metadata caches, so that KVM + * can safely search the IRR and set irr_pending accordingly. + */ + apic->irr_pending = true; + + if (apic->apicv_active) apic->isr_count = 1; - } else { - /* - * Don't clear irr_pending, searching the IRR can race with - * updates from the CPU as APICv is still active from hardware's - * perspective. The flag will be cleared as appropriate when - * KVM injects the interrupt. - */ + else apic->isr_count = count_vectors(apic->regs + APIC_ISR); - } + apic->highest_isr_cache = -1; } diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0b851ef937f2..fb854cf20ac3 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -450,8 +450,11 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp, goto e_free; /* This needs to happen after SEV/SNP firmware initialization. */ - if (vm_type == KVM_X86_SNP_VM && snp_guest_req_init(kvm)) - goto e_free; + if (vm_type == KVM_X86_SNP_VM) { + ret = snp_guest_req_init(kvm); + if (ret) + goto e_free; + } INIT_LIST_HEAD(&sev->regions_list); INIT_LIST_HEAD(&sev->mirror_vms); @@ -2212,10 +2215,6 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) if (sev->snp_context) return -EINVAL; - sev->snp_context = snp_context_create(kvm, argp); - if (!sev->snp_context) - return -ENOTTY; - if (params.flags) return -EINVAL; @@ -2230,6 +2229,10 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) if (params.policy & SNP_POLICY_MASK_SINGLE_SOCKET) return -EINVAL; + sev->snp_context = snp_context_create(kvm, argp); + if (!sev->snp_context) + return -ENOTTY; + start.gctx_paddr = __psp_pa(sev->snp_context); start.policy = params.policy; memcpy(start.gosvw, params.gosvw, sizeof(params.gosvw)); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index a8e7bc04d9bf..931a7361c30f 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1197,11 +1197,14 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, kvm_hv_nested_transtion_tlb_flush(vcpu, enable_ept); /* - * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings - * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a - * full TLB flush from the guest's perspective. This is required even - * if VPID is disabled in the host as KVM may need to synchronize the - * MMU in response to the guest TLB flush. + * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the + * same VPID as the host, and so architecturally, linear and combined + * mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM + * emulates L2 sharing L1's VPID=0 by using vpid01 while running L2, + * and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01. This + * is required if VPID is disabled in KVM, as a TLB flush (there are no + * VPIDs) still occurs from L1's perspective, and KVM may need to + * synchronize the MMU in response to the guest TLB flush. * * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use. * EPT is a special snowflake, as guest-physical mappings aren't @@ -2315,6 +2318,17 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); + /* + * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the + * same VPID as the host. Emulate this behavior by using vpid01 for L2 + * if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter + * and VM-Exit are architecturally required to flush VPID=0, but *only* + * VPID=0. I.e. using vpid02 would be ok (so long as KVM emulates the + * required flushes), but doing so would cause KVM to over-flush. E.g. + * if L1 runs L2 X with VPID12=1, then runs L2 Y with VPID12 disabled, + * and then runs L2 X again, then KVM can and should retain TLB entries + * for VPID12=1. + */ if (enable_vpid) { if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); @@ -5950,6 +5964,12 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + /* + * Always flush the effective vpid02, i.e. never flush the current VPID + * and never explicitly flush vpid01. INVVPID targets a VPID, not a + * VMCS, and so whether or not the current vmcs12 has VPID enabled is + * irrelevant (and there may not be a loaded vmcs12). + */ vpid02 = nested_get_vpid02(vcpu); switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 81ed596e4454..d28618e9277e 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -217,9 +217,11 @@ module_param(ple_window_shrink, uint, 0444); static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; module_param(ple_window_max, uint, 0444); -/* Default is SYSTEM mode, 1 for host-guest mode */ +/* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */ int __read_mostly pt_mode = PT_MODE_SYSTEM; +#ifdef CONFIG_BROKEN module_param(pt_mode, int, S_IRUGO); +#endif struct x86_pmu_lbr __ro_after_init vmx_lbr_caps; @@ -3216,7 +3218,7 @@ void vmx_flush_tlb_all(struct kvm_vcpu *vcpu) static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu) { - if (is_guest_mode(vcpu)) + if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu))) return nested_get_vpid02(vcpu); return to_vmx(vcpu)->vpid; } diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 1a40f0514eaa..5c0cc7aae872 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -671,10 +671,6 @@ static int pcc_data_alloc(int pcc_ss_id) * ) */ -#ifndef arch_init_invariance_cppc -static inline void arch_init_invariance_cppc(void) { } -#endif - /** * acpi_cppc_processor_probe - Search for per CPU _CPC objects. * @pr: Ptr to acpi_processor containing this CPU's logical ID. @@ -905,8 +901,6 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) goto out_free; } - arch_init_invariance_cppc(); - kfree(output.pointer); return 0; diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index cb52dd000b95..3b281bc1e73c 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -237,6 +237,9 @@ static struct notifier_block acpi_processor_notifier_block = { .notifier_call = acpi_processor_notifier, }; +void __weak acpi_processor_init_invariance_cppc(void) +{ } + /* * We keep the driver loaded even when ACPI is not running. * This is needed for the powernow-k8 driver, that works even without @@ -270,6 +273,12 @@ static int __init acpi_processor_driver_init(void) NULL, acpi_soft_cpu_dead); acpi_processor_throttling_init(); + + /* + * Frequency invariance calculations on AMD platforms can't be run until + * after acpi_cppc_processor_probe() has been called for all online CPUs + */ + acpi_processor_init_invariance_cppc(); return 0; err: driver_unregister(&acpi_processor_driver); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 75fcb75d5515..3ebe77566788 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -366,7 +366,7 @@ void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) #ifdef CONFIG_ACPI_CPPC_LIB #include <acpi/cppc_acpi.h> -void topology_init_cpu_capacity_cppc(void) +static inline void topology_init_cpu_capacity_cppc(void) { u64 capacity, capacity_scale = 0; struct cppc_perf_caps perf_caps; @@ -417,6 +417,10 @@ void topology_init_cpu_capacity_cppc(void) exit: free_raw_capacity(); } +void acpi_processor_init_invariance_cppc(void) +{ + topology_init_cpu_capacity_cppc(); +} #endif #ifdef CONFIG_CPU_FREQ diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 438b92967bc3..30a32ebbcc68 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -3288,13 +3288,12 @@ static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb) case INTEL_TLV_TEST_EXCEPTION: /* Generate devcoredump from exception */ if (!hci_devcd_init(hdev, skb->len)) { - hci_devcd_append(hdev, skb); + hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC)); hci_devcd_complete(hdev); } else { bt_dev_err(hdev, "Failed to generate devcoredump"); - kfree_skb(skb); } - return 0; + break; default: bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]); } diff --git a/drivers/char/tpm/tpm-buf.c b/drivers/char/tpm/tpm-buf.c index cad0048bcc3c..e49a19fea3bd 100644 --- a/drivers/char/tpm/tpm-buf.c +++ b/drivers/char/tpm/tpm-buf.c @@ -147,6 +147,26 @@ void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value) EXPORT_SYMBOL_GPL(tpm_buf_append_u32); /** + * tpm_buf_append_handle() - Add a handle + * @chip: &tpm_chip instance + * @buf: &tpm_buf instance + * @handle: a TPM object handle + * + * Add a handle to the buffer, and increase the count tracking the number of + * handles in the command buffer. Works only for command buffers. + */ +void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle) +{ + if (buf->flags & TPM_BUF_TPM2B) { + dev_err(&chip->dev, "Invalid buffer type (TPM2B)\n"); + return; + } + + tpm_buf_append_u32(buf, handle); + buf->handles++; +} + +/** * tpm_buf_read() - Read from a TPM buffer * @buf: &tpm_buf instance * @offset: offset within the buffer diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 1e856259219e..dfdcbd009720 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -14,6 +14,10 @@ #include "tpm.h" #include <crypto/hash_info.h> +static bool disable_pcr_integrity; +module_param(disable_pcr_integrity, bool, 0444); +MODULE_PARM_DESC(disable_pcr_integrity, "Disable integrity protection of TPM2_PCR_Extend"); + static struct tpm2_hash tpm2_hash_map[] = { {HASH_ALGO_SHA1, TPM_ALG_SHA1}, {HASH_ALGO_SHA256, TPM_ALG_SHA256}, @@ -232,18 +236,26 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, int rc; int i; - rc = tpm2_start_auth_session(chip); - if (rc) - return rc; + if (!disable_pcr_integrity) { + rc = tpm2_start_auth_session(chip); + if (rc) + return rc; + } rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND); if (rc) { - tpm2_end_auth_session(chip); + if (!disable_pcr_integrity) + tpm2_end_auth_session(chip); return rc; } - tpm_buf_append_name(chip, &buf, pcr_idx, NULL); - tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0); + if (!disable_pcr_integrity) { + tpm_buf_append_name(chip, &buf, pcr_idx, NULL); + tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0); + } else { + tpm_buf_append_handle(chip, &buf, pcr_idx); + tpm_buf_append_auth(chip, &buf, 0, NULL, 0); + } tpm_buf_append_u32(&buf, chip->nr_allocated_banks); @@ -253,9 +265,11 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, chip->allocated_banks[i].digest_size); } - tpm_buf_fill_hmac_session(chip, &buf); + if (!disable_pcr_integrity) + tpm_buf_fill_hmac_session(chip, &buf); rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value"); - rc = tpm_buf_check_hmac_response(chip, &buf, rc); + if (!disable_pcr_integrity) + rc = tpm_buf_check_hmac_response(chip, &buf, rc); tpm_buf_destroy(&buf); diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c index 0739830904b2..b0f13c8ea79c 100644 --- a/drivers/char/tpm/tpm2-sessions.c +++ b/drivers/char/tpm/tpm2-sessions.c @@ -237,9 +237,7 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, #endif if (!tpm2_chip_auth(chip)) { - tpm_buf_append_u32(buf, handle); - /* count the number of handles in the upper bits of flags */ - buf->handles++; + tpm_buf_append_handle(chip, buf, handle); return; } @@ -272,6 +270,31 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, } EXPORT_SYMBOL_GPL(tpm_buf_append_name); +void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, int passphrase_len) +{ + /* offset tells us where the sessions area begins */ + int offset = buf->handles * 4 + TPM_HEADER_SIZE; + u32 len = 9 + passphrase_len; + + if (tpm_buf_length(buf) != offset) { + /* not the first session so update the existing length */ + len += get_unaligned_be32(&buf->data[offset]); + put_unaligned_be32(len, &buf->data[offset]); + } else { + tpm_buf_append_u32(buf, len); + } + /* auth handle */ + tpm_buf_append_u32(buf, TPM2_RS_PW); + /* nonce */ + tpm_buf_append_u16(buf, 0); + /* attributes */ + tpm_buf_append_u8(buf, 0); + /* passphrase */ + tpm_buf_append_u16(buf, passphrase_len); + tpm_buf_append(buf, passphrase, passphrase_len); +} + /** * tpm_buf_append_hmac_session() - Append a TPM session element * @chip: the TPM chip structure @@ -309,26 +332,8 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, #endif if (!tpm2_chip_auth(chip)) { - /* offset tells us where the sessions area begins */ - int offset = buf->handles * 4 + TPM_HEADER_SIZE; - u32 len = 9 + passphrase_len; - - if (tpm_buf_length(buf) != offset) { - /* not the first session so update the existing length */ - len += get_unaligned_be32(&buf->data[offset]); - put_unaligned_be32(len, &buf->data[offset]); - } else { - tpm_buf_append_u32(buf, len); - } - /* auth handle */ - tpm_buf_append_u32(buf, TPM2_RS_PW); - /* nonce */ - tpm_buf_append_u16(buf, 0); - /* attributes */ - tpm_buf_append_u8(buf, 0); - /* passphrase */ - tpm_buf_append_u16(buf, passphrase_len); - tpm_buf_append(buf, passphrase, passphrase_len); + tpm_buf_append_auth(chip, buf, attributes, passphrase, + passphrase_len); return; } @@ -948,10 +953,13 @@ static int tpm2_load_null(struct tpm_chip *chip, u32 *null_key) /* Deduce from the name change TPM interference: */ dev_err(&chip->dev, "null key integrity check failed\n"); tpm2_flush_context(chip, tmp_null_key); - chip->flags |= TPM_CHIP_FLAG_DISABLE; err: - return rc ? -ENODEV : 0; + if (rc) { + chip->flags |= TPM_CHIP_FLAG_DISABLE; + rc = -ENODEV; + } + return rc; } /** diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index f9105443d7db..be9bee6ab65f 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -40,7 +40,7 @@ #define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL]) # define PLL_POST_DIV_SHIFT 8 -# define PLL_POST_DIV_MASK(p) GENMASK((p)->width - 1, 0) +# define PLL_POST_DIV_MASK(p) GENMASK((p)->width ? (p)->width - 1 : 3, 0) # define PLL_ALPHA_MSB BIT(15) # define PLL_ALPHA_EN BIT(24) # define PLL_ALPHA_MODE BIT(25) diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c index 0f578771071f..8ea25aa25dff 100644 --- a/drivers/clk/qcom/gcc-x1e80100.c +++ b/drivers/clk/qcom/gcc-x1e80100.c @@ -3123,7 +3123,7 @@ static struct clk_branch gcc_pcie_3_pipe_clk = { static struct clk_branch gcc_pcie_3_pipediv2_clk = { .halt_reg = 0x58060, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52020, .enable_mask = BIT(5), @@ -3248,7 +3248,7 @@ static struct clk_branch gcc_pcie_4_pipe_clk = { static struct clk_branch gcc_pcie_4_pipediv2_clk = { .halt_reg = 0x6b054, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52010, .enable_mask = BIT(27), @@ -3373,7 +3373,7 @@ static struct clk_branch gcc_pcie_5_pipe_clk = { static struct clk_branch gcc_pcie_5_pipediv2_clk = { .halt_reg = 0x2f054, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52018, .enable_mask = BIT(19), @@ -3511,7 +3511,7 @@ static struct clk_branch gcc_pcie_6a_pipe_clk = { static struct clk_branch gcc_pcie_6a_pipediv2_clk = { .halt_reg = 0x31060, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52018, .enable_mask = BIT(28), @@ -3649,7 +3649,7 @@ static struct clk_branch gcc_pcie_6b_pipe_clk = { static struct clk_branch gcc_pcie_6b_pipediv2_clk = { .halt_reg = 0x8d060, - .halt_check = BRANCH_HALT_VOTED, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52010, .enable_mask = BIT(28), @@ -6155,7 +6155,7 @@ static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = { .pd = { .name = "gcc_usb3_mp_ss1_phy_gdsc", }, - .pwrsts = PWRSTS_OFF_ON, + .pwrsts = PWRSTS_RET_ON, .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE, }; diff --git a/drivers/clk/qcom/videocc-sm8350.c b/drivers/clk/qcom/videocc-sm8350.c index 5bd6fe3e1298..874d4da95ff8 100644 --- a/drivers/clk/qcom/videocc-sm8350.c +++ b/drivers/clk/qcom/videocc-sm8350.c @@ -452,7 +452,7 @@ static struct gdsc mvs0_gdsc = { .pd = { .name = "mvs0_gdsc", }, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE, .pwrsts = PWRSTS_OFF_ON, }; @@ -461,7 +461,7 @@ static struct gdsc mvs1_gdsc = { .pd = { .name = "mvs1_gdsc", }, - .flags = HW_CTRL | RETAIN_FF_ENABLE, + .flags = HW_CTRL_TRIGGER | RETAIN_FF_ENABLE, .pwrsts = PWRSTS_OFF_ON, }; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index b0018f371ea3..400337f3b572 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1028,26 +1028,29 @@ static void hybrid_update_cpu_capacity_scaling(void) } } -static void __hybrid_init_cpu_capacity_scaling(void) +static void __hybrid_refresh_cpu_capacity_scaling(void) { hybrid_max_perf_cpu = NULL; hybrid_update_cpu_capacity_scaling(); } -static void hybrid_init_cpu_capacity_scaling(void) +static void hybrid_refresh_cpu_capacity_scaling(void) { - bool disable_itmt = false; + guard(mutex)(&hybrid_capacity_lock); - mutex_lock(&hybrid_capacity_lock); + __hybrid_refresh_cpu_capacity_scaling(); +} +static void hybrid_init_cpu_capacity_scaling(bool refresh) +{ /* * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity * scaling has been enabled already and the driver is just changing the * operation mode. */ - if (hybrid_max_perf_cpu) { - __hybrid_init_cpu_capacity_scaling(); - goto unlock; + if (refresh) { + hybrid_refresh_cpu_capacity_scaling(); + return; } /* @@ -1056,19 +1059,25 @@ static void hybrid_init_cpu_capacity_scaling(void) * do not do that when SMT is in use. */ if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) { - __hybrid_init_cpu_capacity_scaling(); - disable_itmt = true; + hybrid_refresh_cpu_capacity_scaling(); + /* + * Disabling ITMT causes sched domains to be rebuilt to disable asym + * packing and enable asym capacity. + */ + sched_clear_itmt_support(); } +} -unlock: - mutex_unlock(&hybrid_capacity_lock); +static bool hybrid_clear_max_perf_cpu(void) +{ + bool ret; - /* - * Disabling ITMT causes sched domains to be rebuilt to disable asym - * packing and enable asym capacity. - */ - if (disable_itmt) - sched_clear_itmt_support(); + guard(mutex)(&hybrid_capacity_lock); + + ret = !!hybrid_max_perf_cpu; + hybrid_max_perf_cpu = NULL; + + return ret; } static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) @@ -1392,7 +1401,7 @@ static void intel_pstate_update_limits_for_all(void) mutex_lock(&hybrid_capacity_lock); if (hybrid_max_perf_cpu) - __hybrid_init_cpu_capacity_scaling(); + __hybrid_refresh_cpu_capacity_scaling(); mutex_unlock(&hybrid_capacity_lock); } @@ -2263,6 +2272,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) } else { cpu->pstate.scaling = perf_ctl_scaling; } + /* + * If the CPU is going online for the first time and it was + * offline initially, asym capacity scaling needs to be updated. + */ + hybrid_update_capacity(cpu); } else { cpu->pstate.scaling = perf_ctl_scaling; cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); @@ -3352,6 +3366,7 @@ static void intel_pstate_driver_cleanup(void) static int intel_pstate_register_driver(struct cpufreq_driver *driver) { + bool refresh_cpu_cap_scaling; int ret; if (driver == &intel_pstate) @@ -3364,6 +3379,8 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) arch_set_max_freq_ratio(global.turbo_disabled); + refresh_cpu_cap_scaling = hybrid_clear_max_perf_cpu(); + intel_pstate_driver = driver; ret = cpufreq_register_driver(intel_pstate_driver); if (ret) { @@ -3373,7 +3390,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) global.min_perf_pct = min_perf_pct_min(); - hybrid_init_cpu_capacity_scaling(); + hybrid_init_cpu_capacity_scaling(refresh_cpu_cap_scaling); return 0; } diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c index d670635914ec..a74600d9f2d7 100644 --- a/drivers/firmware/smccc/smccc.c +++ b/drivers/firmware/smccc/smccc.c @@ -16,7 +16,6 @@ static u32 smccc_version = ARM_SMCCC_VERSION_1_0; static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE; bool __ro_after_init smccc_trng_available = false; -u64 __ro_after_init smccc_has_sve_hint = false; s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED; s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED; @@ -28,9 +27,6 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit) smccc_conduit = conduit; smccc_trng_available = smccc_probe_trng(); - if (IS_ENABLED(CONFIG_ARM64_SVE) && - smccc_version >= ARM_SMCCC_VERSION_1_3) - smccc_has_sve_hint = true; if ((smccc_version >= ARM_SMCCC_VERSION_1_2) && (smccc_conduit != SMCCC_CONDUIT_NONE)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 1f5a296f5ed2..7dd55ed57c1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -172,8 +172,8 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif, &buffer); obj = (union acpi_object *)buffer.pointer; - /* Fail if calling the method fails and ATIF is supported */ - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { + /* Fail if calling the method fails */ + if (ACPI_FAILURE(status)) { DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n", acpi_format_exception(status)); kfree(obj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index cbef720de779..9da4414de617 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -402,7 +402,7 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz int r; uint32_t *data, x; - if (size & 0x3 || *pos & 0x3) + if (size > 4096 || size & 0x3 || *pos & 0x3) return -EINVAL; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); @@ -1648,7 +1648,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { ent = debugfs_create_file(debugfs_regs_names[i], - S_IFREG | 0444, root, + S_IFREG | 0400, root, adev, debugfs_regs[i]); if (!i && !IS_ERR_OR_NULL(ent)) i_size_write(ent->d_inode, adev->rmmio_size); @@ -2100,11 +2100,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_securedisplay_debugfs_init(adev); amdgpu_fw_attestation_debugfs_init(adev); - debugfs_create_file("amdgpu_evict_vram", 0444, root, adev, + debugfs_create_file("amdgpu_evict_vram", 0400, root, adev, &amdgpu_evict_vram_fops); - debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev, + debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev, &amdgpu_evict_gtt_fops); - debugfs_create_file("amdgpu_test_ib", 0444, root, adev, + debugfs_create_file("amdgpu_test_ib", 0400, root, adev, &amdgpu_debugfs_test_ib_fops); debugfs_create_file("amdgpu_vm_info", 0444, root, adev, &amdgpu_debugfs_vm_info_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 5e8833e4fed2..ccfd2a4b4acc 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -482,7 +482,7 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, case AMDGPU_SPX_PARTITION_MODE: return adev->gmc.num_mem_partitions == 1 && num_xcc > 0; case AMDGPU_DPX_PARTITION_MODE: - return adev->gmc.num_mem_partitions != 8 && (num_xcc % 4) == 0; + return adev->gmc.num_mem_partitions <= 2 && (num_xcc % 4) == 0; case AMDGPU_TPX_PARTITION_MODE: return (adev->gmc.num_mem_partitions == 1 || adev->gmc.num_mem_partitions == 3) && diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 13421a58210d..07e9ce99694f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9429,6 +9429,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, bool mode_set_reset_required = false; u32 i; struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; + bool set_backlight_level = false; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -9548,6 +9549,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; mode_set_reset_required = true; + set_backlight_level = true; } else if (modereset_required(new_crtc_state)) { drm_dbg_atomic(dev, "Atomic commit: RESET. crtc id %d:[%p]\n", @@ -9599,6 +9601,19 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, acrtc->otg_inst = status->primary_otg_inst; } } + + /* During boot up and resume the DC layer will reset the panel brightness + * to fix a flicker issue. + * It will cause the dm->actual_brightness is not the current panel brightness + * level. (the dm->brightness is the correct panel level) + * So we set the backlight level with dm->brightness value after set mode + */ + if (set_backlight_level) { + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + } } static void dm_set_writeback(struct amdgpu_display_manager *dm, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 0d8498ab9b23..be8fbb04ad98 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -3127,7 +3127,9 @@ static enum bp_result bios_parser_get_vram_info( struct atom_data_revision revision; // vram info moved to umc_info for DCN4x - if (info && DATA_TABLES(umc_info)) { + if (dcb->ctx->dce_version >= DCN_VERSION_4_01 && + dcb->ctx->dce_version < DCN_VERSION_MAX && + info && DATA_TABLES(umc_info)) { header = GET_IMAGE(struct atom_common_table_header, DATA_TABLES(umc_info)); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 80e60ea2d11e..ee1bcfaae3e3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1259,26 +1259,33 @@ static int smu_sw_init(void *handle) smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + smu->user_dpm_profile.user_workload_mask = 0; atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; - smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; - smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; - smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; - smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; - smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; - smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; + smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; + smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; + smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; + smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3; + smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4; + smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; + smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; if (smu->is_apu || - !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; - else - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + } else { + smu->driver_workload_mask = + 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; + } + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; @@ -2348,17 +2355,20 @@ static int smu_switch_power_profile(void *handle, return -EINVAL; if (!en) { - smu->workload_mask &= ~(1 << smu->workload_prority[type]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } else { - smu->workload_mask |= (1 << smu->workload_prority[type]); + smu->driver_workload_mask |= (1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } + smu->workload_mask = smu->driver_workload_mask | + smu->user_dpm_profile.user_workload_mask; + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) smu_bump_power_profile_mode(smu, workload, 0); @@ -3049,12 +3059,23 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; + int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - return smu_bump_power_profile_mode(smu, param, param_size); + if (smu->user_dpm_profile.user_workload_mask & + (1 << smu->workload_priority[param[param_size]])) + return 0; + + smu->user_dpm_profile.user_workload_mask = + (1 << smu->workload_priority[param[param_size]]); + smu->workload_mask = smu->user_dpm_profile.user_workload_mask | + smu->driver_workload_mask; + ret = smu_bump_power_profile_mode(smu, param, param_size); + + return ret; } static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index b44a185d07e8..d60d9a12a47e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -240,6 +240,7 @@ struct smu_user_dpm_profile { /* user clock state information */ uint32_t clk_mask[SMU_CLK_COUNT]; uint32_t clk_dependency; + uint32_t user_workload_mask; }; #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ @@ -557,7 +558,8 @@ struct smu_context { bool disable_uclk_switch; uint32_t workload_mask; - uint32_t workload_prority[WORKLOAD_POLICY_MAX]; + uint32_t driver_workload_mask; + uint32_t workload_priority[WORKLOAD_POLICY_MAX]; uint32_t workload_setting[WORKLOAD_POLICY_MAX]; uint32_t power_profile_mode; uint32_t default_power_profile_mode; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index c0f6b59369b7..31fe512028f4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1455,7 +1455,6 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return -EINVAL; } - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && (smu->smc_fw_version >= 0x360d00)) { if (size != 10) @@ -1523,14 +1522,14 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 16af1a329621..12223f507977 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2081,10 +2081,13 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 9c3c48297cba..3b7b2ec8319a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1786,10 +1786,13 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + else + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 1fe020f1f4db..952ee22cbc90 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -1079,7 +1079,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", @@ -1087,7 +1087,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index cc0504b063fa..62316a6707ef 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -890,14 +890,14 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u } ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + smu_cmn_assign_power_profile(smu); return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index d53e162dcd8d..5dd7ceca64fe 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2485,7 +2485,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); int workload_type, ret = 0; - u32 workload_mask, selected_workload_mask; + u32 workload_mask; smu->power_profile_mode = input[size]; @@ -2552,7 +2552,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - selected_workload_mask = workload_mask = 1 << workload_type; + workload_mask = 1 << workload_type; /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && @@ -2567,12 +2567,22 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, workload_mask |= 1 << workload_type; } + smu->workload_mask |= workload_mask; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - workload_mask, + smu->workload_mask, NULL); - if (!ret) - smu->workload_mask = selected_workload_mask; + if (!ret) { + smu_cmn_assign_power_profile(smu); + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_FULLSCREEN3D); + smu->power_profile_mode = smu->workload_mask & (1 << workload_type) + ? PP_SMC_POWER_PROFILE_FULLSCREEN3D + : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + } + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index b891a5e0a396..9d0b19419de0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2499,13 +2499,14 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); else - smu->workload_mask = (1 << workload_type); + smu_cmn_assign_power_profile(smu); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index e83ea2bc7f9c..1aa13d32ceb2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -367,54 +367,6 @@ static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) return 0; } -#ifndef atom_smc_dpm_info_table_14_0_0 -struct atom_smc_dpm_info_table_14_0_0 { - struct atom_common_table_header table_header; - BoardTable_t BoardTable; -}; -#endif - -static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu) -{ - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *smc_pptable = table_context->driver_pptable; - struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table; - BoardTable_t *BoardTable = &smc_pptable->BoardTable; - int index, ret; - - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - smc_dpm_info); - - ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, - (uint8_t **)&smc_dpm_table); - if (ret) - return ret; - - memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); - - return 0; -} - -#if 0 -static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, - void **table, - uint32_t *size) -{ - struct smu_table_context *smu_table = &smu->smu_table; - void *combo_pptable = smu_table->combo_pptable; - int ret = 0; - - ret = smu_cmn_get_combo_pptable(smu); - if (ret) - return ret; - - *table = combo_pptable; - *size = sizeof(struct smu_14_0_powerplay_table); - - return 0; -} -#endif - static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, void **table, uint32_t *size) @@ -436,16 +388,12 @@ static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, static int smu_v14_0_2_setup_pptable(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - struct amdgpu_device *adev = smu->adev; int ret = 0; if (amdgpu_sriov_vf(smu->adev)) return 0; - if (!adev->scpm_enabled) - ret = smu_v14_0_setup_pptable(smu); - else - ret = smu_v14_0_2_get_pptable_from_pmfw(smu, + ret = smu_v14_0_2_get_pptable_from_pmfw(smu, &smu_table->power_play_table, &smu_table->power_play_table_size); if (ret) @@ -455,16 +403,6 @@ static int smu_v14_0_2_setup_pptable(struct smu_context *smu) if (ret) return ret; - /* - * With SCPM enabled, the operation below will be handled - * by PSP. Driver involvment is unnecessary and useless. - */ - if (!adev->scpm_enabled) { - ret = smu_v14_0_2_append_powerplay_table(smu); - if (ret) - return ret; - } - ret = smu_v14_0_2_check_powerplay_table(smu); if (ret) return ret; @@ -1869,12 +1807,11 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - 1 << workload_type, - NULL); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + smu->workload_mask, NULL); + if (!ret) - smu->workload_mask = 1 << workload_type; + smu_cmn_assign_power_profile(smu); return ret; } @@ -2799,7 +2736,6 @@ static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .setup_pptable = smu_v14_0_2_setup_pptable, .check_fw_version = smu_v14_0_check_fw_version, - .write_pptable = smu_cmn_write_pptable, .set_driver_table_location = smu_v14_0_set_driver_table_location, .system_features_control = smu_v14_0_system_features_control, .set_allowed_mask = smu_v14_0_set_allowed_mask, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 91ad434bcdae..bdfc5e617333 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -1138,6 +1138,14 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, return ret; } +void smu_cmn_assign_power_profile(struct smu_context *smu) +{ + uint32_t index; + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; +} + bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) { struct pci_dev *p = NULL; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 1de685defe85..8a801e389659 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -130,6 +130,8 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); int smu_cmn_set_mp1_state(struct smu_context *smu, enum pp_mp1_state mp1_state); +void smu_cmn_assign_power_profile(struct smu_context *smu); + /* * Helper function to make sysfs_emit_at() happy. Align buf to * the current page boundary and record the offset. diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 0830cae9a4d0..2d84d7ea1ab7 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -403,7 +403,6 @@ static const struct dmi_system_id orientation_data[] = { }, { /* Lenovo Yoga Tab 3 X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)&lcd1600x2560_rightside_up, diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c index eded5e955cc0..4cb3494c0bb2 100644 --- a/drivers/gpu/drm/imagination/pvr_context.c +++ b/drivers/gpu/drm/imagination/pvr_context.c @@ -17,10 +17,14 @@ #include <drm/drm_auth.h> #include <drm/drm_managed.h> + +#include <linux/bug.h> #include <linux/errno.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/xarray.h> @@ -354,6 +358,10 @@ int pvr_context_create(struct pvr_file *pvr_file, struct drm_pvr_ioctl_create_co return err; } + spin_lock(&pvr_dev->ctx_list_lock); + list_add_tail(&ctx->file_link, &pvr_file->contexts); + spin_unlock(&pvr_dev->ctx_list_lock); + return 0; err_destroy_fw_obj: @@ -380,6 +388,11 @@ pvr_context_release(struct kref *ref_count) container_of(ref_count, struct pvr_context, ref_count); struct pvr_device *pvr_dev = ctx->pvr_dev; + WARN_ON(in_interrupt()); + spin_lock(&pvr_dev->ctx_list_lock); + list_del(&ctx->file_link); + spin_unlock(&pvr_dev->ctx_list_lock); + xa_erase(&pvr_dev->ctx_ids, ctx->ctx_id); pvr_context_destroy_queues(ctx); pvr_fw_object_destroy(ctx->fw_obj); @@ -437,11 +450,30 @@ pvr_context_destroy(struct pvr_file *pvr_file, u32 handle) */ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) { + struct pvr_device *pvr_dev = pvr_file->pvr_dev; struct pvr_context *ctx; unsigned long handle; xa_for_each(&pvr_file->ctx_handles, handle, ctx) pvr_context_destroy(pvr_file, handle); + + spin_lock(&pvr_dev->ctx_list_lock); + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + + while (!list_entry_is_head(ctx, &pvr_file->contexts, file_link)) { + list_del_init(&ctx->file_link); + + if (pvr_context_get_if_referenced(ctx)) { + spin_unlock(&pvr_dev->ctx_list_lock); + + pvr_vm_unmap_all(ctx->vm_ctx); + + pvr_context_put(ctx); + spin_lock(&pvr_dev->ctx_list_lock); + } + ctx = list_first_entry(&pvr_file->contexts, struct pvr_context, file_link); + } + spin_unlock(&pvr_dev->ctx_list_lock); } /** @@ -451,6 +483,7 @@ void pvr_destroy_contexts_for_file(struct pvr_file *pvr_file) void pvr_context_device_init(struct pvr_device *pvr_dev) { xa_init_flags(&pvr_dev->ctx_ids, XA_FLAGS_ALLOC1); + spin_lock_init(&pvr_dev->ctx_list_lock); } /** diff --git a/drivers/gpu/drm/imagination/pvr_context.h b/drivers/gpu/drm/imagination/pvr_context.h index 0c7b97dfa6ba..07afa179cdf4 100644 --- a/drivers/gpu/drm/imagination/pvr_context.h +++ b/drivers/gpu/drm/imagination/pvr_context.h @@ -85,6 +85,9 @@ struct pvr_context { /** @compute: Transfer queue. */ struct pvr_queue *transfer; } queues; + + /** @file_link: pvr_file PVR context list link. */ + struct list_head file_link; }; static __always_inline struct pvr_queue * @@ -124,6 +127,24 @@ pvr_context_get(struct pvr_context *ctx) } /** + * pvr_context_get_if_referenced() - Take an additional reference on a still + * referenced context. + * @ctx: Context pointer. + * + * Call pvr_context_put() to release. + * + * Returns: + * * True on success, or + * * false if no context pointer passed, or the context wasn't still + * * referenced. + */ +static __always_inline bool +pvr_context_get_if_referenced(struct pvr_context *ctx) +{ + return ctx != NULL && kref_get_unless_zero(&ctx->ref_count) != 0; +} + +/** * pvr_context_lookup() - Lookup context pointer from handle and file. * @pvr_file: Pointer to pvr_file structure. * @handle: Context handle. diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index b574e23d484b..6d0dfacb677b 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/math.h> #include <linux/mutex.h> +#include <linux/spinlock_types.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/wait.h> @@ -293,6 +294,12 @@ struct pvr_device { /** @sched_wq: Workqueue for schedulers. */ struct workqueue_struct *sched_wq; + + /** + * @ctx_list_lock: Lock to be held when accessing the context list in + * struct pvr_file. + */ + spinlock_t ctx_list_lock; }; /** @@ -344,6 +351,9 @@ struct pvr_file { * This array is used to allocate handles returned to userspace. */ struct xarray vm_ctx_handles; + + /** @contexts: PVR context list. */ + struct list_head contexts; }; /** diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c index 1a0cb7aa9cea..fb17196e05f4 100644 --- a/drivers/gpu/drm/imagination/pvr_drv.c +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -28,6 +28,7 @@ #include <linux/export.h> #include <linux/fs.h> #include <linux/kernel.h> +#include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -1326,6 +1327,8 @@ pvr_drm_driver_open(struct drm_device *drm_dev, struct drm_file *file) */ pvr_file->pvr_dev = pvr_dev; + INIT_LIST_HEAD(&pvr_file->contexts); + xa_init_flags(&pvr_file->ctx_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->free_list_handles, XA_FLAGS_ALLOC1); xa_init_flags(&pvr_file->hwrt_handles, XA_FLAGS_ALLOC1); diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 97c0f772ed65..7bd6ba4c6e8a 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -14,6 +14,7 @@ #include <drm/drm_gem.h> #include <drm/drm_gpuvm.h> +#include <linux/bug.h> #include <linux/container_of.h> #include <linux/err.h> #include <linux/errno.h> @@ -597,12 +598,26 @@ err_free: } /** - * pvr_vm_context_release() - Teardown a VM context. - * @ref_count: Pointer to reference counter of the VM context. + * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. + * @vm_ctx: Target VM context. * * This function ensures that no mappings are left dangling by unmapping them * all in order of ascending device-virtual address. */ +void +pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) +{ + WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, + vm_ctx->gpuvm_mgr.mm_range)); +} + +/** + * pvr_vm_context_release() - Teardown a VM context. + * @ref_count: Pointer to reference counter of the VM context. + * + * This function also ensures that no mappings are left dangling by calling + * pvr_vm_unmap_all. + */ static void pvr_vm_context_release(struct kref *ref_count) { @@ -612,8 +627,7 @@ pvr_vm_context_release(struct kref *ref_count) if (vm_ctx->fw_mem_ctx_obj) pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj); - WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, - vm_ctx->gpuvm_mgr.mm_range)); + pvr_vm_unmap_all(vm_ctx); pvr_mmu_context_destroy(vm_ctx->mmu_ctx); drm_gem_private_object_fini(&vm_ctx->dummy_gem); diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h index f2a6463f2b05..79406243617c 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.h +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -39,6 +39,7 @@ int pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, u64 device_addr, u64 size); int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size); +void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx); dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx); struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx); diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c index 4082c8f2951d..6fbff516c1c1 100644 --- a/drivers/gpu/drm/panthor/panthor_device.c +++ b/drivers/gpu/drm/panthor/panthor_device.c @@ -390,11 +390,15 @@ int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct * { u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + switch (offset) { case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET: if (vma->vm_end - vma->vm_start != PAGE_SIZE || (vma->vm_flags & (VM_WRITE | VM_EXEC))) return -EINVAL; + vm_flags_clear(vma, VM_MAYWRITE); break; diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 5d5e25b1be95..7db2edb3374c 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -1580,7 +1580,9 @@ panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle) { struct panthor_vm *vm; + xa_lock(&pool->xa); vm = panthor_vm_get(xa_load(&pool->xa, handle)); + xa_unlock(&pool->xa); return vm; } diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 00ad34ed73a5..bd604b9f08e4 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -517,7 +517,7 @@ * [4-6] RSVD * [7] Disabled */ -#define CCS_MODE XE_REG(0x14804) +#define CCS_MODE XE_REG(0x14804, XE_REG_OPTION_MASKED) #define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */ #define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */ #define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 10fd4601b9f2..a1987b554a8d 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -87,10 +87,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) mutex_init(&xef->exec_queue.lock); xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1); - spin_lock(&xe->clients.lock); - xe->clients.count++; - spin_unlock(&xe->clients.lock); - file->driver_priv = xef; kref_init(&xef->refcount); @@ -107,17 +103,12 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) static void xe_file_destroy(struct kref *ref) { struct xe_file *xef = container_of(ref, struct xe_file, refcount); - struct xe_device *xe = xef->xe; xa_destroy(&xef->exec_queue.xa); mutex_destroy(&xef->exec_queue.lock); xa_destroy(&xef->vm.xa); mutex_destroy(&xef->vm.lock); - spin_lock(&xe->clients.lock); - xe->clients.count--; - spin_unlock(&xe->clients.lock); - xe_drm_client_put(xef->client); kfree(xef->process_name); kfree(xef); @@ -333,7 +324,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe->info.force_execlist = xe_modparam.force_execlist; spin_lock_init(&xe->irq.lock); - spin_lock_init(&xe->clients.lock); init_waitqueue_head(&xe->ufence_wq); diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index 894f04770454..34620ef855c0 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -178,4 +178,18 @@ void xe_device_declare_wedged(struct xe_device *xe); struct xe_file *xe_file_get(struct xe_file *xef); void xe_file_put(struct xe_file *xef); +/* + * Occasionally it is seen that the G2H worker starts running after a delay of more than + * a second even after being queued and activated by the Linux workqueue subsystem. This + * leads to G2H timeout error. The root cause of issue lies with scheduling latency of + * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS + * and this is beyond xe kmd. + * + * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. + */ +#define LNL_FLUSH_WORKQUEUE(wq__) \ + flush_workqueue(wq__) +#define LNL_FLUSH_WORK(wrk__) \ + flush_work(wrk__) + #endif diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 09d731a9125c..687f3a9039bb 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -353,15 +353,6 @@ struct xe_device { struct workqueue_struct *wq; } sriov; - /** @clients: drm clients info */ - struct { - /** @clients.lock: Protects drm clients info */ - spinlock_t lock; - - /** @clients.count: number of drm clients */ - u64 count; - } clients; - /** @usm: unified memory state */ struct { /** @usm.asid: convert a ASID to VM */ diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index f23ac1e2ed88..756b492f13b0 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -132,12 +132,16 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (XE_IOCTL_DBG(xe, !q)) return -ENOENT; - if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) - return -EINVAL; + if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, args->num_batch_buffer && - q->width != args->num_batch_buffer)) - return -EINVAL; + q->width != args->num_batch_buffer)) { + err = -EINVAL; + goto err_exec_queue; + } if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) { err = -ECANCELED; @@ -220,6 +224,7 @@ retry: fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); if (IS_ERR(fence)) { err = PTR_ERR(fence); + xe_vm_unlock(vm); goto err_unlock_list; } for (i = 0; i < num_syncs; i++) diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index d098d2dd1b2d..fd0f3b3c9101 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -260,8 +260,14 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) { int i; + /* + * Before releasing our ref to lrc and xef, accumulate our run ticks + */ + xe_exec_queue_update_run_ticks(q); + for (i = 0; i < q->width; ++i) xe_lrc_put(q->lrc[i]); + __xe_exec_queue_free(q); } diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c index d2e4dc3aaf61..ffcbd05671fc 100644 --- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c +++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c @@ -68,6 +68,12 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines) } } + /* + * Mask bits need to be set for the register. Though only Xe2+ + * platforms require setting of mask bits, it won't harm for older + * platforms as these bits are unused there. + */ + mode |= CCS_MODE_CSLICE_0_3_MASK << 16; xe_mmio_write32(gt, CCS_MODE, mode); xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n", @@ -133,9 +139,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, } /* CCS mode can only be updated when there are no drm clients */ - spin_lock(&xe->clients.lock); - if (xe->clients.count) { - spin_unlock(&xe->clients.lock); + mutex_lock(&xe->drm.filelist_mutex); + if (!list_empty(&xe->drm.filelist)) { + mutex_unlock(&xe->drm.filelist_mutex); + xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n"); return -EBUSY; } @@ -146,7 +153,7 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr, xe_gt_reset_async(gt); } - spin_unlock(&xe->clients.lock); + mutex_unlock(&xe->drm.filelist_mutex); return count; } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 8250ef71e685..afdb477ecf83 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -387,6 +387,8 @@ static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node) * the xe_ggtt_clear() called by below xe_ggtt_remove_node(). */ xe_ggtt_node_remove(node, false); + } else { + xe_ggtt_node_fini(node); } } @@ -442,7 +444,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) config->ggtt_region = node; return 0; err: - xe_ggtt_node_fini(node); + pf_release_ggtt(tile, node); return err; } diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index bbb9e411d21f..9d82ea30f4df 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -72,6 +72,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work) struct xe_device *xe = gt_to_xe(gt); struct xe_gt_tlb_invalidation_fence *fence, *next; + LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker); + spin_lock_irq(>->tlb_invalidation.pending_lock); list_for_each_entry_safe(fence, next, >->tlb_invalidation.pending_fences, link) { diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 17986bfd8818..9c505d3517cd 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -897,17 +897,8 @@ retry_same_fence: ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); - /* - * Occasionally it is seen that the G2H worker starts running after a delay of more than - * a second even after being queued and activated by the Linux workqueue subsystem. This - * leads to G2H timeout error. The root cause of issue lies with scheduling latency of - * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS - * and this is beyond xe kmd. - * - * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU. - */ if (!ret) { - flush_work(&ct->g2h_worker); + LNL_FLUSH_WORK(&ct->g2h_worker); if (g2h_fence.done) { xe_gt_warn(gt, "G2H fence %u, action %04x, done\n", g2h_fence.seqno, action[0]); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index f903b0772722..4f5d00aea716 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -745,8 +745,6 @@ static void guc_exec_queue_free_job(struct drm_sched_job *drm_job) { struct xe_sched_job *job = to_xe_sched_job(drm_job); - xe_exec_queue_update_run_ticks(job->q); - trace_xe_sched_job_free(job); xe_sched_job_put(job); } diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c index f5deb81eba01..5b4264ea38bd 100644 --- a/drivers/gpu/drm/xe/xe_wait_user_fence.c +++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c @@ -155,6 +155,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data, } if (!timeout) { + LNL_FLUSH_WORKQUEUE(xe->ordered_wq); + err = do_compare(addr, args->value, args->mask, + args->op); + if (err <= 0) { + drm_dbg(&xe->drm, "LNL_FLUSH_WORKQUEUE resolved ufence timeout\n"); + break; + } err = -ETIME; break; } diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index f31d352d98b5..9d88b4fa03e4 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -524,7 +524,7 @@ err_release_lock: void __i2c_dw_disable(struct dw_i2c_dev *dev) { struct i2c_timings *t = &dev->timings; - unsigned int raw_intr_stats; + unsigned int raw_intr_stats, ic_stats; unsigned int enable; int timeout = 100; bool abort_needed; @@ -532,9 +532,11 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev) int ret; regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats); + regmap_read(dev->map, DW_IC_STATUS, &ic_stats); regmap_read(dev->map, DW_IC_ENABLE, &enable); - abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD; + abort_needed = (raw_intr_stats & DW_IC_INTR_MST_ON_HOLD) || + (ic_stats & DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY); if (abort_needed) { if (!(enable & DW_IC_ENABLE_ENABLE)) { regmap_write(dev->map, DW_IC_ENABLE, DW_IC_ENABLE_ENABLE); diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 8e8854ec9882..2d32896d0673 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -116,6 +116,7 @@ #define DW_IC_STATUS_RFNE BIT(3) #define DW_IC_STATUS_MASTER_ACTIVITY BIT(5) #define DW_IC_STATUS_SLAVE_ACTIVITY BIT(6) +#define DW_IC_STATUS_MASTER_HOLD_TX_FIFO_EMPTY BIT(7) #define DW_IC_SDA_HOLD_RX_SHIFT 16 #define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, 16) diff --git a/drivers/i2c/muxes/i2c-mux-mule.c b/drivers/i2c/muxes/i2c-mux-mule.c index 8e942470b35f..284ff4afeeac 100644 --- a/drivers/i2c/muxes/i2c-mux-mule.c +++ b/drivers/i2c/muxes/i2c-mux-mule.c @@ -66,8 +66,8 @@ static int mule_i2c_mux_probe(struct platform_device *pdev) priv = i2c_mux_priv(muxc); priv->regmap = dev_get_regmap(mux_dev->parent, NULL); - if (IS_ERR(priv->regmap)) - return dev_err_probe(mux_dev, PTR_ERR(priv->regmap), + if (!priv->regmap) + return dev_err_probe(mux_dev, -ENODEV, "No parent i2c register map\n"); platform_set_drvdata(pdev, muxc); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index ce87205e3e82..8b6159f4cdaf 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -524,6 +524,13 @@ static int gic_irq_set_irqchip_state(struct irq_data *d, } gic_poke_irq(d, reg); + + /* + * Force read-back to guarantee that the active state has taken + * effect, and won't race with a guest-driven deactivation. + */ + if (reg == GICD_ISACTIVER) + gic_peek_irq(d, reg); return 0; } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d478aafa02c9..23e0b71b991e 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -2471,7 +2471,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign int r; unsigned int num_locks; struct dm_bufio_client *c; - char slab_name[27]; + char slab_name[64]; + static atomic_t seqno = ATOMIC_INIT(0); if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) { DMERR("%s: block size not specified or is not multiple of 512b", __func__); @@ -2522,7 +2523,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign (block_size < PAGE_SIZE || !is_power_of_2(block_size))) { unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE); - snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size); + snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u-%u", + block_size, atomic_inc_return(&seqno)); c->slab_cache = kmem_cache_create(slab_name, block_size, align, SLAB_RECLAIM_ACCOUNT, NULL); if (!c->slab_cache) { @@ -2531,9 +2533,11 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign } } if (aux_size) - snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size); + snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u-%u", + aux_size, atomic_inc_return(&seqno)); else - snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer"); + snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", + atomic_inc_return(&seqno)); c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size, 0, SLAB_RECLAIM_ACCOUNT, NULL); if (!c->slab_buffer) { diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c index 9c5308298cf1..f3051bd7d2df 100644 --- a/drivers/md/dm-cache-background-tracker.c +++ b/drivers/md/dm-cache-background-tracker.c @@ -11,12 +11,6 @@ #define DM_MSG_PREFIX "dm-background-tracker" -struct bt_work { - struct list_head list; - struct rb_node node; - struct policy_work work; -}; - struct background_tracker { unsigned int max_work; atomic_t pending_promotes; @@ -26,10 +20,10 @@ struct background_tracker { struct list_head issued; struct list_head queued; struct rb_root pending; - - struct kmem_cache *work_cache; }; +struct kmem_cache *btracker_work_cache = NULL; + struct background_tracker *btracker_create(unsigned int max_work) { struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL); @@ -48,12 +42,6 @@ struct background_tracker *btracker_create(unsigned int max_work) INIT_LIST_HEAD(&b->queued); b->pending = RB_ROOT; - b->work_cache = KMEM_CACHE(bt_work, 0); - if (!b->work_cache) { - DMERR("couldn't create mempool for background work items"); - kfree(b); - b = NULL; - } return b; } @@ -66,10 +54,9 @@ void btracker_destroy(struct background_tracker *b) BUG_ON(!list_empty(&b->issued)); list_for_each_entry_safe (w, tmp, &b->queued, list) { list_del(&w->list); - kmem_cache_free(b->work_cache, w); + kmem_cache_free(btracker_work_cache, w); } - kmem_cache_destroy(b->work_cache); kfree(b); } EXPORT_SYMBOL_GPL(btracker_destroy); @@ -180,7 +167,7 @@ static struct bt_work *alloc_work(struct background_tracker *b) if (max_work_reached(b)) return NULL; - return kmem_cache_alloc(b->work_cache, GFP_NOWAIT); + return kmem_cache_alloc(btracker_work_cache, GFP_NOWAIT); } int btracker_queue(struct background_tracker *b, @@ -203,7 +190,7 @@ int btracker_queue(struct background_tracker *b, * There was a race, we'll just ignore this second * bit of work for the same oblock. */ - kmem_cache_free(b->work_cache, w); + kmem_cache_free(btracker_work_cache, w); return -EINVAL; } @@ -244,7 +231,7 @@ void btracker_complete(struct background_tracker *b, update_stats(b, &w->work, -1); rb_erase(&w->node, &b->pending); list_del(&w->list); - kmem_cache_free(b->work_cache, w); + kmem_cache_free(btracker_work_cache, w); } EXPORT_SYMBOL_GPL(btracker_complete); diff --git a/drivers/md/dm-cache-background-tracker.h b/drivers/md/dm-cache-background-tracker.h index 5b8f5c667b81..09c8fc59f7bb 100644 --- a/drivers/md/dm-cache-background-tracker.h +++ b/drivers/md/dm-cache-background-tracker.h @@ -26,6 +26,14 @@ * protected with a spinlock. */ +struct bt_work { + struct list_head list; + struct rb_node node; + struct policy_work work; +}; + +extern struct kmem_cache *btracker_work_cache; + struct background_work; struct background_tracker; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 40709310e327..849eb6333e98 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -10,6 +10,7 @@ #include "dm-bio-record.h" #include "dm-cache-metadata.h" #include "dm-io-tracker.h" +#include "dm-cache-background-tracker.h" #include <linux/dm-io.h> #include <linux/dm-kcopyd.h> @@ -2263,7 +2264,7 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv, /*----------------------------------------------------------------*/ -static struct kmem_cache *migration_cache; +static struct kmem_cache *migration_cache = NULL; #define NOT_CORE_OPTION 1 @@ -3445,22 +3446,36 @@ static int __init dm_cache_init(void) int r; migration_cache = KMEM_CACHE(dm_cache_migration, 0); - if (!migration_cache) - return -ENOMEM; + if (!migration_cache) { + r = -ENOMEM; + goto err; + } + + btracker_work_cache = kmem_cache_create("dm_cache_bt_work", + sizeof(struct bt_work), __alignof__(struct bt_work), 0, NULL); + if (!btracker_work_cache) { + r = -ENOMEM; + goto err; + } r = dm_register_target(&cache_target); if (r) { - kmem_cache_destroy(migration_cache); - return r; + goto err; } return 0; + +err: + kmem_cache_destroy(migration_cache); + kmem_cache_destroy(btracker_work_cache); + return r; } static void __exit dm_cache_exit(void) { dm_unregister_target(&cache_target); kmem_cache_destroy(migration_cache); + kmem_cache_destroy(btracker_work_cache); } module_init(dm_cache_init); diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c index 8526f613a40e..cfbfc4c1b2e6 100644 --- a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c +++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c @@ -348,12 +348,12 @@ static int get_edid_tag_location(const u8 *edid, unsigned int size, /* Return if not a CTA-861 extension block */ if (size < 256 || edid[0] != 0x02 || edid[1] != 0x03) - return -1; + return -ENOENT; /* search tag */ d = edid[0x02] & 0x7f; if (d <= 4) - return -1; + return -ENOENT; i = 0x04; end = 0x00 + d; @@ -371,7 +371,7 @@ static int get_edid_tag_location(const u8 *edid, unsigned int size, return offset + i; i += len + 1; } while (i < end); - return -1; + return -ENOENT; } static void extron_edid_crc(u8 *edid) diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c index ba67587bd43e..171366fe3544 100644 --- a/drivers/media/cec/usb/pulse8/pulse8-cec.c +++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c @@ -685,7 +685,7 @@ static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio, err = pulse8_send_and_wait(pulse8, cmd, 1, cmd[0], 4); if (err) return err; - date = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; + date = ((unsigned)data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; dev_info(pulse8->dev, "Firmware build date %ptT\n", &date); dev_dbg(pulse8->dev, "Persistent config:\n"); diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index 642c48e8c1f5..ded11cd8dbf7 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -1795,6 +1795,9 @@ static void tpg_precalculate_line(struct tpg_data *tpg) unsigned p; unsigned x; + if (WARN_ON_ONCE(!tpg->src_width || !tpg->scaled_width)) + return; + switch (tpg->pattern) { case TPG_PAT_GREEN: contrast = TPG_COLOR_100_RED; diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 29a8d876e6c2..b0523fc23506 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -1482,18 +1482,23 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) } vb->planes[plane].dbuf_mapped = 1; } + } else { + for (plane = 0; plane < vb->num_planes; ++plane) + dma_buf_put(planes[plane].dbuf); + } - /* - * Now that everything is in order, copy relevant information - * provided by userspace. - */ - for (plane = 0; plane < vb->num_planes; ++plane) { - vb->planes[plane].bytesused = planes[plane].bytesused; - vb->planes[plane].length = planes[plane].length; - vb->planes[plane].m.fd = planes[plane].m.fd; - vb->planes[plane].data_offset = planes[plane].data_offset; - } + /* + * Now that everything is in order, copy relevant information + * provided by userspace. + */ + for (plane = 0; plane < vb->num_planes; ++plane) { + vb->planes[plane].bytesused = planes[plane].bytesused; + vb->planes[plane].length = planes[plane].length; + vb->planes[plane].m.fd = planes[plane].m.fd; + vb->planes[plane].data_offset = planes[plane].data_offset; + } + if (reacquired) { /* * Call driver-specific initialization on the newly acquired buffer, * if provided. @@ -1503,9 +1508,6 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) dprintk(q, 1, "buffer initialization failed\n"); goto err_put_vb2_buf; } - } else { - for (plane = 0; plane < vb->num_planes; ++plane) - dma_buf_put(planes[plane].dbuf); } ret = call_vb_qop(vb, buf_prepare, vb); diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 4f78f30b3646..a05aa271a1ba 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -443,8 +443,8 @@ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wra default: fepriv->auto_step++; - fepriv->auto_sub_step = -1; /* it'll be incremented to 0 in a moment */ - break; + fepriv->auto_sub_step = 0; + continue; } if (!ready) fepriv->auto_sub_step++; diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 192a8230c4aa..29edaaff7a5c 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -366,9 +366,15 @@ int dvb_vb2_querybuf(struct dvb_vb2_ctx *ctx, struct dmx_buffer *b) int dvb_vb2_expbuf(struct dvb_vb2_ctx *ctx, struct dmx_exportbuffer *exp) { struct vb2_queue *q = &ctx->vb_q; + struct vb2_buffer *vb2 = vb2_get_buffer(q, exp->index); int ret; - ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, q->bufs[exp->index], + if (!vb2) { + dprintk(1, "[%s] invalid buffer index\n", ctx->name); + return -EINVAL; + } + + ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, vb2, 0, exp->flags); if (ret) { dprintk(1, "[%s] index=%d errno=%d\n", ctx->name, diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index b43695bc51e7..9df7c213716a 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -86,10 +86,15 @@ static DECLARE_RWSEM(minor_rwsem); static int dvb_device_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev; + unsigned int minor = iminor(inode); + + if (minor >= MAX_DVB_MINORS) + return -ENODEV; mutex_lock(&dvbdev_mutex); down_read(&minor_rwsem); - dvbdev = dvb_minors[iminor(inode)]; + + dvbdev = dvb_minors[minor]; if (dvbdev && dvbdev->fops) { int err = 0; @@ -525,7 +530,10 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (!dvb_minors[minor]) break; - if (minor == MAX_DVB_MINORS) { +#else + minor = nums2minor(adap->num, type, id); +#endif + if (minor >= MAX_DVB_MINORS) { if (new_node) { list_del(&new_node->list_head); kfree(dvbdevfops); @@ -538,9 +546,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, mutex_unlock(&dvbdev_register_lock); return -EINVAL; } -#else - minor = nums2minor(adap->num, type, id); -#endif + dvbdev->minor = minor; dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c index 8b978a9f74a4..f5dd3a81725a 100644 --- a/drivers/media/dvb-frontends/cx24116.c +++ b/drivers/media/dvb-frontends/cx24116.c @@ -741,6 +741,7 @@ static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr) { struct cx24116_state *state = fe->demodulator_priv; u8 snr_reading; + int ret; static const u32 snr_tab[] = { /* 10 x Table (rounded up) */ 0x00000, 0x0199A, 0x03333, 0x04ccD, 0x06667, 0x08000, 0x0999A, 0x0b333, 0x0cccD, 0x0e667, @@ -749,7 +750,11 @@ static int cx24116_read_snr_pct(struct dvb_frontend *fe, u16 *snr) dprintk("%s()\n", __func__); - snr_reading = cx24116_readreg(state, CX24116_REG_QUALITY0); + ret = cx24116_readreg(state, CX24116_REG_QUALITY0); + if (ret < 0) + return ret; + + snr_reading = ret; if (snr_reading >= 0xa0 /* 100% */) *snr = 0xffff; diff --git a/drivers/media/dvb-frontends/stb0899_algo.c b/drivers/media/dvb-frontends/stb0899_algo.c index df89c33dac23..40537c4ccb0d 100644 --- a/drivers/media/dvb-frontends/stb0899_algo.c +++ b/drivers/media/dvb-frontends/stb0899_algo.c @@ -269,7 +269,7 @@ static enum stb0899_status stb0899_search_carrier(struct stb0899_state *state) short int derot_freq = 0, last_derot_freq = 0, derot_limit, next_loop = 3; int index = 0; - u8 cfr[2]; + u8 cfr[2] = {0}; u8 reg; internal->status = NOCARRIER; diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 48230d5109f0..272945a878b3 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -2519,10 +2519,10 @@ static int adv76xx_log_status(struct v4l2_subdev *sd) const struct adv76xx_chip_info *info = state->info; struct v4l2_dv_timings timings; struct stdi_readback stdi; - u8 reg_io_0x02 = io_read(sd, 0x02); + int ret; + u8 reg_io_0x02; u8 edid_enabled; u8 cable_det; - static const char * const csc_coeff_sel_rb[16] = { "bypassed", "YPbPr601 -> RGB", "reserved", "YPbPr709 -> RGB", "reserved", "RGB -> YPbPr601", "reserved", "RGB -> YPbPr709", @@ -2621,13 +2621,21 @@ static int adv76xx_log_status(struct v4l2_subdev *sd) v4l2_info(sd, "-----Color space-----\n"); v4l2_info(sd, "RGB quantization range ctrl: %s\n", rgb_quantization_range_txt[state->rgb_quantization_range]); - v4l2_info(sd, "Input color space: %s\n", - input_color_space_txt[reg_io_0x02 >> 4]); - v4l2_info(sd, "Output color space: %s %s, alt-gamma %s\n", - (reg_io_0x02 & 0x02) ? "RGB" : "YCbCr", - (((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ? - "(16-235)" : "(0-255)", - (reg_io_0x02 & 0x08) ? "enabled" : "disabled"); + + ret = io_read(sd, 0x02); + if (ret < 0) { + v4l2_info(sd, "Can't read Input/Output color space\n"); + } else { + reg_io_0x02 = ret; + + v4l2_info(sd, "Input color space: %s\n", + input_color_space_txt[reg_io_0x02 >> 4]); + v4l2_info(sd, "Output color space: %s %s, alt-gamma %s\n", + (reg_io_0x02 & 0x02) ? "RGB" : "YCbCr", + (((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ? + "(16-235)" : "(0-255)", + (reg_io_0x02 & 0x08) ? "enabled" : "disabled"); + } v4l2_info(sd, "Color space conversion: %s\n", csc_coeff_sel_rb[cp_read(sd, info->cp_csc) >> 4]); diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c index fc27238dd4d3..24873149096c 100644 --- a/drivers/media/i2c/ar0521.c +++ b/drivers/media/i2c/ar0521.c @@ -255,10 +255,10 @@ static u32 calc_pll(struct ar0521_dev *sensor, u32 freq, u16 *pre_ptr, u16 *mult continue; /* Minimum value */ if (new_mult > 254) break; /* Maximum, larger pre won't work either */ - if (sensor->extclk_freq * (u64)new_mult < AR0521_PLL_MIN * + if (sensor->extclk_freq * (u64)new_mult < (u64)AR0521_PLL_MIN * new_pre) continue; - if (sensor->extclk_freq * (u64)new_mult > AR0521_PLL_MAX * + if (sensor->extclk_freq * (u64)new_mult > (u64)AR0521_PLL_MAX * new_pre) break; /* Larger pre won't work either */ new_pll = div64_round_up(sensor->extclk_freq * (u64)new_mult, diff --git a/drivers/media/pci/mgb4/mgb4_cmt.c b/drivers/media/pci/mgb4/mgb4_cmt.c index 70dc78ef193c..a25b68403bc6 100644 --- a/drivers/media/pci/mgb4/mgb4_cmt.c +++ b/drivers/media/pci/mgb4/mgb4_cmt.c @@ -227,6 +227,8 @@ void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev, u32 config; size_t i; + freq_range = array_index_nospec(freq_range, ARRAY_SIZE(cmt_vals_in)); + addr = cmt_addrs_in[vindev->config->id]; reg_set = cmt_vals_in[freq_range]; diff --git a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c index d2c4a0178b3c..1db4609b3557 100644 --- a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c @@ -775,11 +775,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx) (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2; jpeg_buffer.curr = 0; - word = 0; - if (get_word_be(&jpeg_buffer, &word)) return; - jpeg_buffer.size = (long)word - 2; + + if (word < 2) + jpeg_buffer.size = 0; + else + jpeg_buffer.size = (long)word - 2; + jpeg_buffer.data += 2; jpeg_buffer.curr = 0; @@ -1058,6 +1061,7 @@ static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word) if (byte == -1) return -1; *word = (unsigned int)byte | temp; + return 0; } @@ -1145,7 +1149,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; - if (!length) + if (length <= 0) return false; sof = jpeg_buffer.curr; /* after 0xffc0 */ sof_len = length; @@ -1176,7 +1180,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; - if (!length) + if (length <= 0) return false; if (n_dqt >= S5P_JPEG_MAX_MARKER) return false; @@ -1189,7 +1193,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; - if (!length) + if (length <= 0) return false; if (n_dht >= S5P_JPEG_MAX_MARKER) return false; @@ -1214,6 +1218,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, if (get_word_be(&jpeg_buffer, &word)) break; length = (long)word - 2; + /* No need to check underflows as skip() does it */ skip(&jpeg_buffer, length); break; } diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c index 00e0d08af357..4f330f4fc6be 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.c +++ b/drivers/media/test-drivers/vivid/vivid-core.c @@ -910,7 +910,7 @@ static int vivid_create_queue(struct vivid_dev *dev, * videobuf2-core.c to MAX_BUFFER_INDEX. */ if (buf_type == V4L2_BUF_TYPE_VIDEO_CAPTURE) - q->max_num_buffers = 64; + q->max_num_buffers = MAX_VID_CAP_BUFFERS; if (buf_type == V4L2_BUF_TYPE_SDR_CAPTURE) q->max_num_buffers = 1024; if (buf_type == V4L2_BUF_TYPE_VBI_CAPTURE) diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h index cc18a3bc6dc0..d2d52763b119 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.h +++ b/drivers/media/test-drivers/vivid/vivid-core.h @@ -26,6 +26,8 @@ #define MAX_INPUTS 16 /* The maximum number of outputs */ #define MAX_OUTPUTS 16 +/* The maximum number of video capture buffers */ +#define MAX_VID_CAP_BUFFERS 64 /* The maximum up or down scaling factor is 4 */ #define MAX_ZOOM 4 /* The maximum image width/height are set to 4K DMT */ @@ -481,7 +483,7 @@ struct vivid_dev { /* video capture */ struct tpg_data tpg; unsigned ms_vid_cap; - bool must_blank[VIDEO_MAX_FRAME]; + bool must_blank[MAX_VID_CAP_BUFFERS]; const struct vivid_fmt *fmt_cap; struct v4l2_fract timeperframe_vid_cap; diff --git a/drivers/media/test-drivers/vivid/vivid-ctrls.c b/drivers/media/test-drivers/vivid/vivid-ctrls.c index 8bb38bc7b8cc..2b5c8fbcd0a2 100644 --- a/drivers/media/test-drivers/vivid/vivid-ctrls.c +++ b/drivers/media/test-drivers/vivid/vivid-ctrls.c @@ -553,7 +553,7 @@ static int vivid_vid_cap_s_ctrl(struct v4l2_ctrl *ctrl) break; case VIVID_CID_PERCENTAGE_FILL: tpg_s_perc_fill(&dev->tpg, ctrl->val); - for (i = 0; i < VIDEO_MAX_FRAME; i++) + for (i = 0; i < MAX_VID_CAP_BUFFERS; i++) dev->must_blank[i] = ctrl->val < 100; break; case VIVID_CID_INSERT_SAV: diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index 69620e0a35a0..6a790ac8cbe6 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -213,7 +213,7 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) dev->vid_cap_seq_count = 0; dprintk(dev, 1, "%s\n", __func__); - for (i = 0; i < VIDEO_MAX_FRAME; i++) + for (i = 0; i < MAX_VID_CAP_BUFFERS; i++) dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; if (dev->start_streaming_error) { dev->start_streaming_error = false; diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c index e5a364efd5e6..95a2202879d8 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-api.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c @@ -753,9 +753,10 @@ static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) for (i = 0; i < master->ncontrols; i++) cur_to_new(master->cluster[i]); ret = call_op(master, g_volatile_ctrl); - new_to_user(c, ctrl); + if (!ret) + ret = new_to_user(c, ctrl); } else { - cur_to_user(c, ctrl); + ret = cur_to_user(c, ctrl); } v4l2_ctrl_unlock(master); return ret; @@ -770,7 +771,10 @@ int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) if (!ctrl || !ctrl->is_int) return -EINVAL; ret = get_ctrl(ctrl, &c); - control->value = c.value; + + if (!ret) + control->value = c.value; + return ret; } EXPORT_SYMBOL(v4l2_g_ctrl); @@ -811,10 +815,11 @@ static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, int ret; v4l2_ctrl_lock(ctrl); - user_to_new(c, ctrl); - ret = set_ctrl(fh, ctrl, 0); + ret = user_to_new(c, ctrl); + if (!ret) + ret = set_ctrl(fh, ctrl, 0); if (!ret) - cur_to_user(c, ctrl); + ret = cur_to_user(c, ctrl); v4l2_ctrl_unlock(ctrl); return ret; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 9e8bdd0d0922..49dd4fe195e5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1008,6 +1008,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_UP) bond_hw_addr_flush(bond->dev, old_active->dev); + + bond_slave_ns_maddrs_add(bond, old_active); } if (new_active) { @@ -1024,6 +1026,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, dev_mc_sync(new_active->dev, bond->dev); netif_addr_unlock_bh(bond->dev); } + + bond_slave_ns_maddrs_del(bond, new_active); } } @@ -2350,6 +2354,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bond_compute_features(bond); bond_set_carrier(bond); + /* Needs to be called before bond_select_active_slave(), which will + * remove the maddrs if the slave is selected as active slave. + */ + bond_slave_ns_maddrs_add(bond, new_slave); + if (bond_uses_primary(bond)) { block_netpoll_tx(); bond_select_active_slave(bond); @@ -2359,7 +2368,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); - if (!slave_dev->netdev_ops->ndo_bpf || !slave_dev->netdev_ops->ndo_xdp_xmit) { if (bond->xdp_prog) { @@ -2557,6 +2565,12 @@ static int __bond_release_one(struct net_device *bond_dev, if (oldcurrent == slave) bond_change_active_slave(bond, NULL); + /* Must be called after bond_change_active_slave () as the slave + * might change from an active slave to a backup slave. Then it is + * necessary to clear the maddrs on the backup slave. + */ + bond_slave_ns_maddrs_del(bond, slave); + if (bond_is_lb(bond)) { /* Must be called only after the slave has been * detached from the list and the curr_active_slave diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 95d59a18c022..327b6ecdc77e 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -15,6 +15,7 @@ #include <linux/sched/signal.h> #include <net/bonding.h> +#include <net/ndisc.h> static int bond_option_active_slave_set(struct bonding *bond, const struct bond_opt_value *newval); @@ -1234,6 +1235,68 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond, } #if IS_ENABLED(CONFIG_IPV6) +static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *slave) +{ + return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && + !bond_is_active_slave(slave) && + slave->dev->flags & IFF_MULTICAST; +} + +static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) +{ + struct in6_addr *targets = bond->params.ns_targets; + char slot_maddr[MAX_ADDR_LEN]; + int i; + + if (!slave_can_set_ns_maddr(bond, slave)) + return; + + for (i = 0; i < BOND_MAX_NS_TARGETS; i++) { + if (ipv6_addr_any(&targets[i])) + break; + + if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) { + if (add) + dev_mc_add(slave->dev, slot_maddr); + else + dev_mc_del(slave->dev, slot_maddr); + } + } +} + +void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave) +{ + if (!bond->params.arp_validate) + return; + slave_set_ns_maddrs(bond, slave, true); +} + +void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) +{ + if (!bond->params.arp_validate) + return; + slave_set_ns_maddrs(bond, slave, false); +} + +static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave, + struct in6_addr *target, struct in6_addr *slot) +{ + char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN]; + + if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave)) + return; + + /* remove the previous maddr from slave */ + if (!ipv6_addr_any(slot) && + !ndisc_mc_map(slot, slot_maddr, slave->dev, 0)) + dev_mc_del(slave->dev, slot_maddr); + + /* add new maddr on slave if target is set */ + if (!ipv6_addr_any(target) && + !ndisc_mc_map(target, target_maddr, slave->dev, 0)) + dev_mc_add(slave->dev, target_maddr); +} + static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, struct in6_addr *target, unsigned long last_rx) @@ -1243,8 +1306,10 @@ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, struct slave *slave; if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) { - bond_for_each_slave(bond, slave, iter) + bond_for_each_slave(bond, slave, iter) { slave->target_last_arp_rx[slot] = last_rx; + slave_set_ns_maddr(bond, slave, target, &targets[slot]); + } targets[slot] = *target; } } @@ -1296,15 +1361,30 @@ static int bond_option_ns_ip6_targets_set(struct bonding *bond, { return -EPERM; } + +static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) {} + +void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave) {} + +void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) {} #endif static int bond_option_arp_validate_set(struct bonding *bond, const struct bond_opt_value *newval) { + bool changed = !!bond->params.arp_validate != !!newval->value; + struct list_head *iter; + struct slave *slave; + netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n", newval->string, newval->value); bond->params.arp_validate = newval->value; + if (changed) { + bond_for_each_slave(bond, slave, iter) + slave_set_ns_maddrs(bond, slave, !!bond->params.arp_validate); + } + return 0; } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 0ba658a72d8f..d16817e0476f 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1131,6 +1131,10 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds) if (i == dev->cpu_port) continue; ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED); + + /* Power down the internal PHY if port is unused. */ + if (dsa_is_unused_port(ds, i) && dev->info->internal_phy[i]) + ksz_pwrite16(dev, i, 0x100, BMCR_PDOWN); } } diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index 7d7560f23a73..1c6d7fc16772 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -2,7 +2,7 @@ /* * Microchip KSZ9477 series register access through I2C * - * Copyright (C) 2018-2019 Microchip Technology Inc. + * Copyright (C) 2018-2024 Microchip Technology Inc. */ #include <linux/i2c.h> @@ -16,6 +16,8 @@ KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0); static int ksz9477_i2c_probe(struct i2c_client *i2c) { + const struct ksz_chip_data *chip; + struct device *ddev = &i2c->dev; struct regmap_config rc; struct ksz_device *dev; int i, ret; @@ -24,6 +26,12 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c) if (!dev) return -ENOMEM; + chip = device_get_match_data(ddev); + if (!chip) + return -EINVAL; + + /* Save chip id to do special initialization when probing. */ + dev->chip_id = chip->chip_id; for (i = 0; i < __KSZ_NUM_REGMAPS; i++) { rc = ksz9477_regmap_config[i]; rc.lock_arg = &dev->regmap_mutex; @@ -111,6 +119,10 @@ static const struct of_device_id ksz9477_dt_ids[] = { .compatible = "microchip,ksz9567", .data = &ksz_switch_chips[KSZ9567] }, + { + .compatible = "microchip,lan9646", + .data = &ksz_switch_chips[LAN9646] + }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index d57782cdda59..920443ee8ffd 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1908,6 +1908,41 @@ const struct ksz_chip_data ksz_switch_chips[] = { .internal_phy = {true, true, true, true, false, false, true, true}, }, + + [LAN9646] = { + .chip_id = LAN9646_CHIP_ID, + .dev_name = "LAN9646", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x7F, /* can be configured as cpu port */ + .port_cnt = 7, /* total physical port count */ + .port_nirqs = 4, + .num_tx_queues = 4, + .num_ipms = 8, + .ops = &ksz9477_dev_ops, + .phylink_mac_ops = &ksz9477_phylink_mac_ops, + .phy_errata_9477 = true, + .mib_names = ksz9477_mib_names, + .mib_cnt = ARRAY_SIZE(ksz9477_mib_names), + .reg_mib_cnt = MIB_COUNTER_NUM, + .regs = ksz9477_regs, + .masks = ksz9477_masks, + .shifts = ksz9477_shifts, + .xmii_ctrl0 = ksz9477_xmii_ctrl0, + .xmii_ctrl1 = ksz9477_xmii_ctrl1, + .supports_mii = {false, false, false, false, + false, true, true}, + .supports_rmii = {false, false, false, false, + false, true, true}, + .supports_rgmii = {false, false, false, false, + false, true, true}, + .internal_phy = {true, true, true, true, + true, false, false}, + .gbit_capable = {true, true, true, true, true, true, true}, + .wr_table = &ksz9477_register_set, + .rd_table = &ksz9477_register_set, + }, }; EXPORT_SYMBOL_GPL(ksz_switch_chips); @@ -2970,6 +3005,7 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port) case KSZ9896_CHIP_ID: /* KSZ9896C Errata DS80000757A Module 3 */ case KSZ9897_CHIP_ID: + case LAN9646_CHIP_ID: /* KSZ9897R Errata DS80000758C Module 4 */ /* Energy Efficient Ethernet (EEE) feature select must be manually disabled * The EEE feature is enabled by default, but it is not fully @@ -3230,6 +3266,7 @@ static void ksz_port_teardown(struct dsa_switch *ds, int port) case KSZ9893_CHIP_ID: case KSZ9896_CHIP_ID: case KSZ9897_CHIP_ID: + case LAN9646_CHIP_ID: if (dsa_is_user_port(ds, port)) ksz9477_port_acl_free(dev, port); } @@ -3286,7 +3323,8 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds, dev->chip_id == KSZ9477_CHIP_ID || dev->chip_id == KSZ9896_CHIP_ID || dev->chip_id == KSZ9897_CHIP_ID || - dev->chip_id == KSZ9567_CHIP_ID) + dev->chip_id == KSZ9567_CHIP_ID || + dev->chip_id == LAN9646_CHIP_ID) proto = DSA_TAG_PROTO_KSZ9477; if (is_lan937x(dev)) @@ -3405,6 +3443,7 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port) case LAN9372_CHIP_ID: case LAN9373_CHIP_ID: case LAN9374_CHIP_ID: + case LAN9646_CHIP_ID: return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN; } @@ -3427,6 +3466,7 @@ static int ksz_validate_eee(struct dsa_switch *ds, int port) case KSZ9893_CHIP_ID: case KSZ9896_CHIP_ID: case KSZ9897_CHIP_ID: + case LAN9646_CHIP_ID: return 0; } @@ -3779,7 +3819,10 @@ static int ksz_switch_detect(struct ksz_device *dev) case LAN9372_CHIP_ID: case LAN9373_CHIP_ID: case LAN9374_CHIP_ID: - dev->chip_id = id32; + + /* LAN9646 does not have its own chip id. */ + if (dev->chip_id != LAN9646_CHIP_ID) + dev->chip_id = id32; break; case KSZ9893_CHIP_ID: ret = ksz_read8(dev, REG_CHIP_ID4, @@ -3818,6 +3861,7 @@ static int ksz_cls_flower_add(struct dsa_switch *ds, int port, case KSZ9893_CHIP_ID: case KSZ9896_CHIP_ID: case KSZ9897_CHIP_ID: + case LAN9646_CHIP_ID: return ksz9477_cls_flower_add(ds, port, cls, ingress); } @@ -3838,6 +3882,7 @@ static int ksz_cls_flower_del(struct dsa_switch *ds, int port, case KSZ9893_CHIP_ID: case KSZ9896_CHIP_ID: case KSZ9897_CHIP_ID: + case LAN9646_CHIP_ID: return ksz9477_cls_flower_del(ds, port, cls, ingress); } @@ -4925,6 +4970,7 @@ static int ksz_parse_drive_strength(struct ksz_device *dev) case KSZ9893_CHIP_ID: case KSZ9896_CHIP_ID: case KSZ9897_CHIP_ID: + case LAN9646_CHIP_ID: return ksz9477_drive_strength_write(dev, of_props, ARRAY_SIZE(of_props)); default: diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index bbb548af201e..b3bb75ca0796 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -236,6 +236,7 @@ enum ksz_model { LAN9372, LAN9373, LAN9374, + LAN9646, }; enum ksz_regs { diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c index 1c6652f2b9fe..108a958dc356 100644 --- a/drivers/net/dsa/microchip/ksz_spi.c +++ b/drivers/net/dsa/microchip/ksz_spi.c @@ -54,6 +54,8 @@ static int ksz_spi_probe(struct spi_device *spi) if (!chip) return -EINVAL; + /* Save chip id to do special initialization when probing. */ + dev->chip_id = chip->chip_id; if (chip->chip_id == KSZ88X3_CHIP_ID) regmap_config = ksz8863_regmap_config; else if (chip->chip_id == KSZ8795_CHIP_ID || @@ -203,6 +205,10 @@ static const struct of_device_id ksz_dt_ids[] = { .compatible = "microchip,lan9374", .data = &ksz_switch_chips[LAN9374] }, + { + .compatible = "microchip,lan9646", + .data = &ksz_switch_chips[LAN9646] + }, {}, }; MODULE_DEVICE_TABLE(of, ksz_dt_ids); @@ -228,6 +234,7 @@ static const struct spi_device_id ksz_spi_ids[] = { { "lan9372" }, { "lan9373" }, { "lan9374" }, + { "lan9646" }, { }, }; MODULE_DEVICE_TABLE(spi, ksz_spi_ids); diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index f8d8c70642c4..59b4a7240b58 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -673,7 +673,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, * We therefore need to lock the MDIO bus onto which the switch is * connected. */ - mutex_lock(&priv->bus->mdio_lock); + mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); /* Actually start the request: * 1. Send mdio master packet diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e9aab2e2840e..4c1302a8f72d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -864,6 +864,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) bnapi->events &= ~BNXT_TX_CMP_EVENT; } +static bool bnxt_separate_head_pool(void) +{ + return PAGE_SIZE > BNXT_RX_PAGE_SIZE; +} + static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, unsigned int *offset, @@ -886,27 +891,19 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, } static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, + struct bnxt_rx_ring_info *rxr, gfp_t gfp) { - u8 *data; - struct pci_dev *pdev = bp->pdev; + unsigned int offset; + struct page *page; - if (gfp == GFP_ATOMIC) - data = napi_alloc_frag(bp->rx_buf_size); - else - data = netdev_alloc_frag(bp->rx_buf_size); - if (!data) + page = page_pool_alloc_frag(rxr->head_pool, &offset, + bp->rx_buf_size, gfp); + if (!page) return NULL; - *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - - if (dma_mapping_error(&pdev->dev, *mapping)) { - skb_free_frag(data); - data = NULL; - } - return data; + *mapping = page_pool_get_dma_addr(page) + bp->rx_dma_offset + offset; + return page_address(page) + offset; } int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, @@ -928,7 +925,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, rx_buf->data = page; rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; } else { - u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); + u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, gfp); if (!data) return -ENOMEM; @@ -1179,13 +1176,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, } skb = napi_build_skb(data, bp->rx_buf_size); - dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + bp->rx_dir); if (!skb) { - skb_free_frag(data); + page_pool_free_va(rxr->head_pool, data, true); return NULL; } + skb_mark_for_recycle(skb); skb_reserve(skb, bp->rx_offset); skb_put(skb, offset_and_len & 0xffff); return skb; @@ -1840,7 +1838,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, u8 *new_data; dma_addr_t new_mapping; - new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); + new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, rxr, + GFP_ATOMIC); if (!new_data) { bnxt_abort_tpa(cpr, idx, agg_bufs); cpr->sw_stats->rx.rx_oom_discards += 1; @@ -1852,16 +1851,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info->mapping = new_mapping; skb = napi_build_skb(data, bp->rx_buf_size); - dma_unmap_single_attrs(&bp->pdev->dev, mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&bp->pdev->dev, mapping, + bp->rx_buf_use_size, bp->rx_dir); if (!skb) { - skb_free_frag(data); + page_pool_free_va(rxr->head_pool, data, true); bnxt_abort_tpa(cpr, idx, agg_bufs); cpr->sw_stats->rx.rx_oom_discards += 1; return NULL; } + skb_mark_for_recycle(skb); skb_reserve(skb, bp->rx_offset); skb_put(skb, len); } @@ -3308,28 +3307,22 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) { - struct pci_dev *pdev = bp->pdev; int i, max_idx; max_idx = bp->rx_nr_pages * RX_DESC_CNT; for (i = 0; i < max_idx; i++) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; - dma_addr_t mapping = rx_buf->mapping; void *data = rx_buf->data; if (!data) continue; rx_buf->data = NULL; - if (BNXT_RX_PAGE_MODE(bp)) { + if (BNXT_RX_PAGE_MODE(bp)) page_pool_recycle_direct(rxr->page_pool, data); - } else { - dma_unmap_single_attrs(&pdev->dev, mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - skb_free_frag(data); - } + else + page_pool_free_va(rxr->head_pool, data, true); } } @@ -3356,7 +3349,6 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct pci_dev *pdev = bp->pdev; struct bnxt_tpa_idx_map *map; int i; @@ -3370,13 +3362,8 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) if (!data) continue; - dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - tpa_info->data = NULL; - - skb_free_frag(data); + page_pool_free_va(rxr->head_pool, data, false); } skip_rx_tpa_free: @@ -3592,7 +3579,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp) xdp_rxq_info_unreg(&rxr->xdp_rxq); page_pool_destroy(rxr->page_pool); - rxr->page_pool = NULL; + if (rxr->page_pool != rxr->head_pool) + page_pool_destroy(rxr->head_pool); + rxr->page_pool = rxr->head_pool = NULL; kfree(rxr->rx_agg_bmap); rxr->rx_agg_bmap = NULL; @@ -3610,6 +3599,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, int numa_node) { struct page_pool_params pp = { 0 }; + struct page_pool *pool; pp.pool_size = bp->rx_agg_ring_size; if (BNXT_RX_PAGE_MODE(bp)) @@ -3622,14 +3612,25 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, pp.max_len = PAGE_SIZE; pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - rxr->page_pool = page_pool_create(&pp); - if (IS_ERR(rxr->page_pool)) { - int err = PTR_ERR(rxr->page_pool); + pool = page_pool_create(&pp); + if (IS_ERR(pool)) + return PTR_ERR(pool); + rxr->page_pool = pool; - rxr->page_pool = NULL; - return err; + if (bnxt_separate_head_pool()) { + pp.pool_size = max(bp->rx_ring_size, 1024); + pool = page_pool_create(&pp); + if (IS_ERR(pool)) + goto err_destroy_pp; } + rxr->head_pool = pool; + return 0; + +err_destroy_pp: + page_pool_destroy(rxr->page_pool); + rxr->page_pool = NULL; + return PTR_ERR(pool); } static int bnxt_alloc_rx_rings(struct bnxt *bp) @@ -4180,7 +4181,8 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) u8 *data; for (i = 0; i < bp->max_tpa; i++) { - data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); + data = __bnxt_alloc_rx_frag(bp, &mapping, rxr, + GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 69231e85140b..649955fa3e37 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1105,6 +1105,7 @@ struct bnxt_rx_ring_info { struct bnxt_ring_struct rx_agg_ring_struct; struct xdp_rxq_info xdp_rxq; struct page_pool *page_pool; + struct page_pool *head_pool; }; struct bnxt_rx_sw_stats { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index d18c11e406fc..a5f8ce576b6e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -174,9 +174,11 @@ static int enetc_vf_probe(struct pci_dev *pdev, si = pci_get_drvdata(pdev); si->revision = ENETC_REV_1_0; err = enetc_get_driver_data(si); - if (err) - return dev_err_probe(&pdev->dev, err, - "Could not get VF driver data\n"); + if (err) { + dev_err_probe(&pdev->dev, err, + "Could not get VF driver data\n"); + goto err_alloc_netdev; + } enetc_get_si_caps(si); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b83df5f94b1f..f1d088168723 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -907,7 +907,7 @@ static int igb_request_msix(struct igb_adapter *adapter) int i, err = 0, vector = 0, free_vector = 0; err = request_irq(adapter->msix_entries[vector].vector, - igb_msix_other, IRQF_NO_THREAD, netdev->name, adapter); + igb_msix_other, 0, netdev->name, adapter); if (err) goto err_out; diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index a32d85d6f599..35c4f5f64f58 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -46,3 +46,11 @@ config OCTEONTX2_VF depends on OCTEONTX2_PF help This driver supports Marvell's OcteonTX2 NIC virtual function. + +config RVU_ESWITCH + tristate "Marvell RVU E-Switch support" + depends on OCTEONTX2_PF + default m + help + This driver supports Marvell's RVU E-Switch that + provides internal SRIOV packet steering and switching. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 3cf4c8285c90..ccea37847df8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -11,4 +11,5 @@ rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ - rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o + rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o \ + rvu_rep.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 2436c1ff9ba4..5d84386ed22d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -156,6 +156,7 @@ enum nix_scheduler { #define NIC_HW_MIN_FRS 40 #define NIC_HW_MAX_FRS 9212 #define SDP_HW_MAX_FRS 65535 +#define SDP_HW_MIN_FRS 16 #define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */ #define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 6ea2f3071fe8..62c07407eb94 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -144,6 +144,9 @@ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ M(PTP_GET_CAP, 0x00c, ptp_get_cap, msg_req, ptp_get_cap_rsp) \ +M(GET_REP_CNT, 0x00d, get_rep_cnt, msg_req, get_rep_cnt_rsp) \ +M(ESW_CFG, 0x00e, esw_cfg, esw_cfg_req, msg_rsp) \ +M(REP_EVENT_NOTIFY, 0x00f, rep_event_notify, rep_event, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ @@ -319,6 +322,7 @@ M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_re M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \ nix_mcast_grp_update_req, \ nix_mcast_grp_update_rsp) \ +M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ mcs_alloc_rsrc_rsp) \ @@ -380,12 +384,16 @@ M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp) #define MBOX_UP_MCS_MESSAGES \ M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp) +#define MBOX_UP_REP_MESSAGES \ +M(REP_EVENT_UP_NOTIFY, 0xEF0, rep_event_up_notify, rep_event, msg_rsp) \ + enum { #define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id, MBOX_MESSAGES MBOX_UP_CGX_MESSAGES MBOX_UP_CPT_MESSAGES MBOX_UP_MCS_MESSAGES +MBOX_UP_REP_MESSAGES #undef M }; @@ -1364,6 +1372,37 @@ struct nix_bandprof_get_hwinfo_rsp { u32 policer_timeunit; }; +struct nix_stats_req { + struct mbox_msghdr hdr; + u8 reset; + u16 pcifunc; + u64 rsvd; +}; + +struct nix_stats_rsp { + struct mbox_msghdr hdr; + u16 pcifunc; + struct { + u64 octs; + u64 ucast; + u64 bcast; + u64 mcast; + u64 drop; + u64 drop_octs; + u64 drop_mcast; + u64 drop_bcast; + u64 err; + u64 rsvd[5]; + } rx; + struct { + u64 ucast; + u64 bcast; + u64 mcast; + u64 drop; + u64 octs; + } tx; +}; + /* NPC mbox message structs */ #define NPC_MCAM_ENTRY_INVALID 0xFFFF @@ -1525,6 +1564,41 @@ struct ptp_get_cap_rsp { u64 cap; }; +struct get_rep_cnt_rsp { + struct mbox_msghdr hdr; + u16 rep_cnt; + u16 rep_pf_map[64]; + u64 rsvd; +}; + +struct esw_cfg_req { + struct mbox_msghdr hdr; + u8 ena; + u64 rsvd; +}; + +struct rep_evt_data { + u8 port_state; + u8 vf_state; + u16 rx_mode; + u16 rx_flags; + u16 mtu; + u8 mac[ETH_ALEN]; + u64 rsvd[5]; +}; + +struct rep_event { + struct mbox_msghdr hdr; + u16 pcifunc; +#define RVU_EVENT_PORT_STATE BIT_ULL(0) +#define RVU_EVENT_PFVF_STATE BIT_ULL(1) +#define RVU_EVENT_MTU_CHANGE BIT_ULL(2) +#define RVU_EVENT_RX_MODE_CHANGE BIT_ULL(3) +#define RVU_EVENT_MAC_ADDR_CHANGE BIT_ULL(4) + u16 event; + struct rep_evt_data evt_data; +}; + struct flow_msg { unsigned char dmac[6]; unsigned char smac[6]; @@ -1563,6 +1637,7 @@ struct flow_msg { u8 icmp_type; u8 icmp_code; __be16 tcp_flags; + u16 sq_id; }; struct npc_install_flow_req { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index e8c6a6fe9bd5..b897845e25fd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -513,6 +513,11 @@ struct rvu_switch { u16 start_entry; }; +struct rep_evtq_ent { + struct list_head node; + struct rep_event event; +}; + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; @@ -595,6 +600,15 @@ struct rvu { spinlock_t cpt_intr_lock; struct mutex mbox_lock; /* Serialize mbox up and down msgs */ + u16 rep_pcifunc; + int rep_cnt; + u16 *rep2pfvf_map; + u8 rep_mode; + struct work_struct rep_evt_work; + struct workqueue_struct *rep_evt_wq; + struct list_head rep_evtq_head; + /* Representor event lock */ + spinlock_t rep_evtq_lock; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -853,6 +867,14 @@ bool is_sdp_pfvf(u16 pcifunc); bool is_sdp_pf(u16 pcifunc); bool is_sdp_vf(struct rvu *rvu, u16 pcifunc); +static inline bool is_rep_dev(struct rvu *rvu, u16 pcifunc) +{ + if (rvu->rep_pcifunc && rvu->rep_pcifunc == pcifunc) + return true; + + return false; +} + /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) { @@ -1051,7 +1073,8 @@ int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr); /* RVU Switch */ void rvu_switch_enable(struct rvu *rvu); void rvu_switch_disable(struct rvu *rvu); -void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena); +void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool ena); int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, u64 pkind, u8 var_len_off, u8 var_len_off_mask, @@ -1064,4 +1087,9 @@ int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc); void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena); void rvu_mcs_exit(struct rvu *rvu); +/* Representor APIs */ +int rvu_rep_pf_init(struct rvu *rvu); +int rvu_rep_install_mcam_rules(struct rvu *rvu); +void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena); +int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable); #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 8c700ee4a82b..148144f5b61d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -45,33 +45,6 @@ enum { CGX_STAT18, }; -/* NIX TX stats */ -enum nix_stat_lf_tx { - TX_UCAST = 0x0, - TX_BCAST = 0x1, - TX_MCAST = 0x2, - TX_DROP = 0x3, - TX_OCTS = 0x4, - TX_STATS_ENUM_LAST, -}; - -/* NIX RX stats */ -enum nix_stat_lf_rx { - RX_OCTS = 0x0, - RX_UCAST = 0x1, - RX_BCAST = 0x2, - RX_MCAST = 0x3, - RX_DROP = 0x4, - RX_DROP_OCTS = 0x5, - RX_FCS = 0x6, - RX_ERR = 0x7, - RX_DRP_BCAST = 0x8, - RX_DRP_MCAST = 0x9, - RX_DRP_L3BCAST = 0xa, - RX_DRP_L3MCAST = 0xb, - RX_STATS_ENUM_LAST, -}; - static char *cgx_rx_stats_fields[] = { [CGX_STAT0] = "Received packets", [CGX_STAT1] = "Octets of received packets", diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 9c26e19a860b..dab4deca893f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1500,6 +1500,9 @@ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) struct rvu *rvu = rvu_dl->rvu; struct rvu_switch *rswitch; + if (rvu->rep_mode) + return -EOPNOTSUPP; + rswitch = &rvu->rswitch; *mode = rswitch->mode; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index da69350c6f76..5d5a01dbbca1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -31,6 +31,7 @@ static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, u32 leaf_prof); static const char *nix_get_ctx_name(int ctype); +static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc); enum mc_tbl_sz { MC_TBL_SZ_256, @@ -312,7 +313,9 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, /* TLs aggegating traffic are shared across PF and VFs */ if (lvl >= hw->cap.nix_tx_aggr_lvl) { - if (rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) + if ((nix_get_tx_link(rvu, map_func) != + nix_get_tx_link(rvu, pcifunc)) && + (rvu_get_pf(map_func) != rvu_get_pf(pcifunc))) return false; else return true; @@ -360,7 +363,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); - break; case NIX_INTF_TYPE_LBK: vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; @@ -584,6 +586,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) return 0; + if (is_sdp_pfvf(pcifunc)) + type = NIX_INTF_TYPE_SDP; + pfvf = rvu_get_pfvf(rvu, pcifunc); err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); if (err) @@ -1614,6 +1619,12 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, cfg = NPC_TX_DEF_PKIND; rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); + if (is_rep_dev(rvu, pcifunc)) { + pfvf->tx_chan_base = RVU_SWITCH_LBK_CHAN; + pfvf->tx_chan_cnt = 1; + goto exit; + } + intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (is_sdp_pfvf(pcifunc)) intf = NIX_INTF_TYPE_SDP; @@ -1684,6 +1695,9 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; + if (is_rep_dev(rvu, pcifunc)) + goto free_lf; + if (req->flags & NIX_LF_DISABLE_FLOWS) rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); else @@ -1695,6 +1709,7 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req, nix_interface_deinit(rvu, pcifunc, nixlf); +free_lf: /* Reset this NIX LF */ err = rvu_lf_reset(rvu, block, nixlf); if (err) { @@ -2007,7 +2022,8 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, struct rvu_hwinfo *hw = rvu->hw; int pf = rvu_get_pf(pcifunc); - if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */ + /* LBK links */ + if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) { *start = hw->cap.nix_txsch_per_cgx_lmac * link; *end = *start + hw->cap.nix_txsch_per_lbk_lmac; } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ @@ -2760,7 +2776,7 @@ void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, int schq; u64 cfg; - if (!is_pf_cgxmapped(rvu, pf)) + if (!is_pf_cgxmapped(rvu, pf) && !is_rep_dev(rvu, pcifunc)) return; cfg = enable ? (BIT_ULL(12) | RVU_SWITCH_LBK_CHAN) : 0; @@ -4393,8 +4409,6 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) ether_addr_copy(pfvf->default_mac, req->mac_addr); - rvu_switch_update_rules(rvu, pcifunc); - return 0; } @@ -4555,7 +4569,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; - if (is_lbk_vf(rvu, pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_rep_dev(rvu, pcifunc)) rvu_get_lbk_link_max_frs(rvu, &max_mtu); else rvu_get_lmac_link_max_frs(rvu, &max_mtu); @@ -4583,6 +4597,8 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, /* For VFs of PF0 ingress is LBK port, so config LBK link */ pfvf = rvu_get_pfvf(rvu, pcifunc); link = hw->cgx_links + pfvf->lbkid; + } else if (is_rep_dev(rvu, pcifunc)) { + link = hw->cgx_links + 0; } if (link < 0) @@ -4674,7 +4690,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, if (hw->sdp_links) { link = hw->cgx_links + hw->lbk_links; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), - SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + SDP_HW_MAX_FRS << 16 | SDP_HW_MIN_FRS); } /* Get MCS external bypass status for CN10K-B */ @@ -5166,7 +5182,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; - int nixlf, err; + int nixlf, err, pf; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) @@ -5182,7 +5198,11 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); set_bit(NIXLF_INITIALIZED, &pfvf->flags); - rvu_switch_update_rules(rvu, pcifunc); + rvu_switch_update_rules(rvu, pcifunc, true); + + pf = rvu_get_pf(pcifunc); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, true); return rvu_cgx_start_stop_io(rvu, pcifunc, true); } @@ -5192,7 +5212,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, { u16 pcifunc = req->hdr.pcifunc; struct rvu_pfvf *pfvf; - int nixlf, err; + int nixlf, err, pf; err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); if (err) @@ -5210,8 +5230,12 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, if (err) return err; + rvu_switch_update_rules(rvu, pcifunc, false); rvu_cgx_tx_enable(rvu, pcifunc, true); + pf = rvu_get_pf(pcifunc); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, false); return 0; } @@ -5239,6 +5263,9 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) clear_bit(NIXLF_INITIALIZED, &pfvf->flags); + if (is_pf_cgxmapped(rvu, pf) && rvu->rep_mode) + rvu_rep_notify_pfvf_state(rvu, pcifunc, false); + rvu_cgx_start_stop_io(rvu, pcifunc, false); if (pfvf->sq_ctx) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 7a1c18b1486d..da69e454662a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1398,6 +1398,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, struct npc_install_flow_rsp *rsp) { bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); + bool from_rep_dev = !!is_rep_dev(rvu, req->hdr.pcifunc); struct rvu_switch *rswitch = &rvu->rswitch; int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; @@ -1454,14 +1455,19 @@ process_flow: /* AF installing for a PF/VF */ if (!req->hdr.pcifunc) target = req->vf; + /* PF installing for its VF */ - else if (!from_vf && req->vf) { + if (!from_vf && req->vf && !from_rep_dev) { target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf; pf_set_vfs_mac = req->default_rule && (req->features & BIT_ULL(NPC_DMAC)); } - /* msg received from PF/VF */ + + /* Representor device installing for a representee */ + if (from_rep_dev && req->vf) + target = req->vf; else + /* msg received from PF/VF */ target = req->hdr.pcifunc; /* ignore chan_mask in case pf func is not AF, revisit later */ @@ -1474,8 +1480,10 @@ process_flow: pfvf = rvu_get_pfvf(rvu, target); + if (from_rep_dev) + req->channel = pfvf->rx_chan_base; /* PF installing for its VF */ - if (req->hdr.pcifunc && !from_vf && req->vf) + if (req->hdr.pcifunc && !from_vf && req->vf && !from_rep_dev) set_bit(PF_SET_VF_CFG, &pfvf->flags); /* update req destination mac addr */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 2b299fa85159..62cdc714ba57 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -445,6 +445,7 @@ #define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12) #define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0) +#define NIX_VLAN_ETYPE_MASK GENMASK_ULL(63, 48) #define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16) #define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c new file mode 100644 index 000000000000..052ae5923e3a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/bitfield.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "rvu.h" +#include "rvu_reg.h" + +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ +static struct _req_type __maybe_unused \ +*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ +{ \ + struct _req_type *req; \ + \ + req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ + &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ + sizeof(struct _rsp_type)); \ + if (!req) \ + return NULL; \ + req->hdr.sig = OTX2_MBOX_REQ_SIG; \ + req->hdr.id = _id; \ + return req; \ +} + +MBOX_UP_REP_MESSAGES +#undef M + +static int rvu_rep_up_notify(struct rvu *rvu, struct rep_event *event) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, event->pcifunc); + struct rep_event *msg; + int pf; + + pf = rvu_get_pf(event->pcifunc); + + if (event->event & RVU_EVENT_MAC_ADDR_CHANGE) + ether_addr_copy(pfvf->mac_addr, event->evt_data.mac); + + mutex_lock(&rvu->mbox_lock); + msg = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); + if (!msg) { + mutex_unlock(&rvu->mbox_lock); + return -ENOMEM; + } + + msg->hdr.pcifunc = event->pcifunc; + msg->event = event->event; + + memcpy(&msg->evt_data, &event->evt_data, sizeof(struct rep_evt_data)); + + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); + + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + + mutex_unlock(&rvu->mbox_lock); + return 0; +} + +static void rvu_rep_wq_handler(struct work_struct *work) +{ + struct rvu *rvu = container_of(work, struct rvu, rep_evt_work); + struct rep_evtq_ent *qentry; + struct rep_event *event; + unsigned long flags; + + do { + spin_lock_irqsave(&rvu->rep_evtq_lock, flags); + qentry = list_first_entry_or_null(&rvu->rep_evtq_head, + struct rep_evtq_ent, + node); + if (qentry) + list_del(&qentry->node); + + spin_unlock_irqrestore(&rvu->rep_evtq_lock, flags); + if (!qentry) + break; /* nothing more to process */ + + event = &qentry->event; + + rvu_rep_up_notify(rvu, event); + kfree(qentry); + } while (1); +} + +int rvu_mbox_handler_rep_event_notify(struct rvu *rvu, struct rep_event *req, + struct msg_rsp *rsp) +{ + struct rep_evtq_ent *qentry; + + qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); + if (!qentry) + return -ENOMEM; + + qentry->event = *req; + spin_lock(&rvu->rep_evtq_lock); + list_add_tail(&qentry->node, &rvu->rep_evtq_head); + spin_unlock(&rvu->rep_evtq_lock); + queue_work(rvu->rep_evt_wq, &rvu->rep_evt_work); + return 0; +} + +int rvu_rep_notify_pfvf_state(struct rvu *rvu, u16 pcifunc, bool enable) +{ + struct rep_event *req; + int pf; + + if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) + return 0; + + pf = rvu_get_pf(rvu->rep_pcifunc); + + mutex_lock(&rvu->mbox_lock); + req = otx2_mbox_alloc_msg_rep_event_up_notify(rvu, pf); + if (!req) { + mutex_unlock(&rvu->mbox_lock); + return -ENOMEM; + } + + req->hdr.pcifunc = rvu->rep_pcifunc; + req->event |= RVU_EVENT_PFVF_STATE; + req->pcifunc = pcifunc; + req->evt_data.vf_state = enable; + + otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf); + otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); + + mutex_unlock(&rvu->mbox_lock); + return 0; +} + +#define RVU_LF_RX_STATS(reg) \ + rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_STATX(nixlf, reg)) + +#define RVU_LF_TX_STATS(reg) \ + rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_STATX(nixlf, reg)) + +int rvu_mbox_handler_nix_lf_stats(struct rvu *rvu, + struct nix_stats_req *req, + struct nix_stats_rsp *rsp) +{ + u16 pcifunc = req->pcifunc; + int nixlf, blkaddr, err; + struct msg_req rst_req; + struct msg_rsp rst_rsp; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (err) + return 0; + + if (req->reset) { + rst_req.hdr.pcifunc = pcifunc; + return rvu_mbox_handler_nix_stats_rst(rvu, &rst_req, &rst_rsp); + } + rsp->rx.octs = RVU_LF_RX_STATS(RX_OCTS); + rsp->rx.ucast = RVU_LF_RX_STATS(RX_UCAST); + rsp->rx.bcast = RVU_LF_RX_STATS(RX_BCAST); + rsp->rx.mcast = RVU_LF_RX_STATS(RX_MCAST); + rsp->rx.drop = RVU_LF_RX_STATS(RX_DROP); + rsp->rx.err = RVU_LF_RX_STATS(RX_ERR); + rsp->rx.drop_octs = RVU_LF_RX_STATS(RX_DROP_OCTS); + rsp->rx.drop_mcast = RVU_LF_RX_STATS(RX_DRP_MCAST); + rsp->rx.drop_bcast = RVU_LF_RX_STATS(RX_DRP_BCAST); + + rsp->tx.octs = RVU_LF_TX_STATS(TX_OCTS); + rsp->tx.ucast = RVU_LF_TX_STATS(TX_UCAST); + rsp->tx.bcast = RVU_LF_TX_STATS(TX_BCAST); + rsp->tx.mcast = RVU_LF_TX_STATS(TX_MCAST); + rsp->tx.drop = RVU_LF_TX_STATS(TX_DROP); + + rsp->pcifunc = req->pcifunc; + return 0; +} + +static u16 rvu_rep_get_vlan_id(struct rvu *rvu, u16 pcifunc) +{ + int id; + + for (id = 0; id < rvu->rep_cnt; id++) + if (rvu->rep2pfvf_map[id] == pcifunc) + return id; + return 0; +} + +static int rvu_rep_tx_vlan_cfg(struct rvu *rvu, u16 pcifunc, + u16 vlan_tci, int *vidx) +{ + struct nix_vtag_config_rsp rsp = {}; + struct nix_vtag_config req = {}; + u64 etype = ETH_P_8021Q; + int err; + + /* Insert vlan tag */ + req.hdr.pcifunc = pcifunc; + req.vtag_size = VTAGSIZE_T4; + req.cfg_type = 0; /* tx vlan cfg */ + req.tx.cfg_vtag0 = true; + req.tx.vtag0 = FIELD_PREP(NIX_VLAN_ETYPE_MASK, etype) | vlan_tci; + + err = rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp); + if (err) { + dev_err(rvu->dev, "Tx vlan config failed\n"); + return err; + } + *vidx = rsp.vtag0_idx; + return 0; +} + +static int rvu_rep_rx_vlan_cfg(struct rvu *rvu, u16 pcifunc) +{ + struct nix_vtag_config req = {}; + struct nix_vtag_config_rsp rsp; + + /* config strip, capture and size */ + req.hdr.pcifunc = pcifunc; + req.vtag_size = VTAGSIZE_T4; + req.cfg_type = 1; /* rx vlan cfg */ + req.rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0; + req.rx.strip_vtag = true; + req.rx.capture_vtag = false; + + return rvu_mbox_handler_nix_vtag_cfg(rvu, &req, &rsp); +} + +static int rvu_rep_install_rx_rule(struct rvu *rvu, u16 pcifunc, + u16 entry, bool rte) +{ + struct npc_install_flow_req req = {}; + struct npc_install_flow_rsp rsp = {}; + struct rvu_pfvf *pfvf; + u16 vlan_tci, rep_id; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + /* To steer the traffic from Representee to Representor */ + rep_id = rvu_rep_get_vlan_id(rvu, pcifunc); + if (rte) { + vlan_tci = rep_id | BIT_ULL(8); + req.vf = rvu->rep_pcifunc; + req.op = NIX_RX_ACTIONOP_UCAST; + req.index = rep_id; + } else { + vlan_tci = rep_id; + req.vf = pcifunc; + req.op = NIX_RX_ACTION_DEFAULT; + } + + rvu_rep_rx_vlan_cfg(rvu, req.vf); + req.entry = entry; + req.hdr.pcifunc = 0; /* AF is requester */ + req.features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_VLAN_ETYPE_CTAG); + req.vtag0_valid = true; + req.vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0; + req.packet.vlan_etype = cpu_to_be16(ETH_P_8021Q); + req.mask.vlan_etype = cpu_to_be16(ETH_P_8021Q); + req.packet.vlan_tci = cpu_to_be16(vlan_tci); + req.mask.vlan_tci = cpu_to_be16(0xffff); + + req.channel = RVU_SWITCH_LBK_CHAN; + req.chan_mask = 0xffff; + req.intf = pfvf->nix_rx_intf; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_rep_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry, + bool rte) +{ + struct npc_install_flow_req req = {}; + struct npc_install_flow_rsp rsp = {}; + struct rvu_pfvf *pfvf; + int vidx, err; + u16 vlan_tci; + u8 lbkid; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + vlan_tci = rvu_rep_get_vlan_id(rvu, pcifunc); + if (rte) + vlan_tci |= BIT_ULL(8); + + err = rvu_rep_tx_vlan_cfg(rvu, pcifunc, vlan_tci, &vidx); + if (err) + return err; + + lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; + req.hdr.pcifunc = 0; /* AF is requester */ + if (rte) { + req.vf = pcifunc; + } else { + req.vf = rvu->rep_pcifunc; + req.packet.sq_id = vlan_tci; + req.mask.sq_id = 0xffff; + } + + req.entry = entry; + req.intf = pfvf->nix_tx_intf; + req.op = NIX_TX_ACTIONOP_UCAST_CHAN; + req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; + req.set_cntr = 1; + req.vtag0_def = vidx; + req.vtag0_op = 1; + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +int rvu_rep_install_mcam_rules(struct rvu *rvu) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u16 start = rswitch->start_entry; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc, entry = 0; + int pf, vf, numvfs; + int err, nixlf, i; + u8 rep; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << RVU_PFVF_PF_SHIFT; + rvu_get_nix_blkaddr(rvu, pcifunc); + rep = true; + for (i = 0; i < 2; i++) { + err = rvu_rep_install_rx_rule(rvu, pcifunc, + start + entry, rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + + err = rvu_rep_install_tx_rule(rvu, pcifunc, + start + entry, rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + rep = false; + } + + rvu_get_pf_numvfs(rvu, pf, &numvfs, NULL); + for (vf = 0; vf < numvfs; vf++) { + pcifunc = pf << RVU_PFVF_PF_SHIFT | + ((vf + 1) & RVU_PFVF_FUNC_MASK); + rvu_get_nix_blkaddr(rvu, pcifunc); + + /* Skip installimg rules if nixlf is not attached */ + err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL); + if (err) + continue; + rep = true; + for (i = 0; i < 2; i++) { + err = rvu_rep_install_rx_rule(rvu, pcifunc, + start + entry, + rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + + err = rvu_rep_install_tx_rule(rvu, pcifunc, + start + entry, + rep); + if (err) + return err; + rswitch->entry2pcifunc[entry++] = pcifunc; + rep = false; + } + } + } + + /* Initialize the wq for handling REP events */ + spin_lock_init(&rvu->rep_evtq_lock); + INIT_LIST_HEAD(&rvu->rep_evtq_head); + INIT_WORK(&rvu->rep_evt_work, rvu_rep_wq_handler); + rvu->rep_evt_wq = alloc_workqueue("rep_evt_wq", 0, 0); + if (!rvu->rep_evt_wq) { + dev_err(rvu->dev, "REP workqueue allocation failed\n"); + return -ENOMEM; + } + return 0; +} + +void rvu_rep_update_rules(struct rvu *rvu, u16 pcifunc, bool ena) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + struct npc_mcam *mcam = &rvu->hw->mcam; + u32 max = rswitch->used_entries; + int blkaddr; + u16 entry; + + if (!rswitch->used_entries) + return; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + + if (blkaddr < 0) + return; + + rvu_switch_enable_lbk_link(rvu, pcifunc, ena); + mutex_lock(&mcam->lock); + for (entry = 0; entry < max; entry++) { + if (rswitch->entry2pcifunc[entry] == pcifunc) + npc_enable_mcam_entry(rvu, mcam, blkaddr, entry, ena); + } + mutex_unlock(&mcam->lock); +} + +int rvu_rep_pf_init(struct rvu *rvu) +{ + u16 pcifunc = rvu->rep_pcifunc; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + set_bit(NIXLF_INITIALIZED, &pfvf->flags); + rvu_switch_enable_lbk_link(rvu, pcifunc, true); + rvu_rep_rx_vlan_cfg(rvu, pcifunc); + return 0; +} + +int rvu_mbox_handler_esw_cfg(struct rvu *rvu, struct esw_cfg_req *req, + struct msg_rsp *rsp) +{ + if (req->hdr.pcifunc != rvu->rep_pcifunc) + return 0; + + rvu->rep_mode = req->ena; + + if (!rvu->rep_mode) + rvu_npc_free_mcam_entries(rvu, req->hdr.pcifunc, -1); + + return 0; +} + +int rvu_mbox_handler_get_rep_cnt(struct rvu *rvu, struct msg_req *req, + struct get_rep_cnt_rsp *rsp) +{ + int pf, vf, numvfs, hwvf, rep = 0; + u16 pcifunc; + + rvu->rep_pcifunc = req->hdr.pcifunc; + rsp->rep_cnt = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + rvu->rep_cnt = rsp->rep_cnt; + + rvu->rep2pfvf_map = devm_kzalloc(rvu->dev, rvu->rep_cnt * + sizeof(u16), GFP_KERNEL); + if (!rvu->rep2pfvf_map) + return -ENOMEM; + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + pcifunc = pf << RVU_PFVF_PF_SHIFT; + rvu->rep2pfvf_map[rep] = pcifunc; + rsp->rep_pf_map[rep] = pcifunc; + rep++; + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + for (vf = 0; vf < numvfs; vf++) { + rvu->rep2pfvf_map[rep] = pcifunc | + ((vf + 1) & RVU_PFVF_FUNC_MASK); + rsp->rep_pf_map[rep] = rvu->rep2pfvf_map[rep]; + rep++; + } + } + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index fc8da2090657..77ac94cb2ec4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -823,4 +823,30 @@ enum nix_tx_vtag_op { #define VTAG_STRIP BIT_ULL(4) #define VTAG_CAPTURE BIT_ULL(5) +/* NIX TX stats */ +enum nix_stat_lf_tx { + TX_UCAST = 0x0, + TX_BCAST = 0x1, + TX_MCAST = 0x2, + TX_DROP = 0x3, + TX_OCTS = 0x4, + TX_STATS_ENUM_LAST, +}; + +/* NIX RX stats */ +enum nix_stat_lf_rx { + RX_OCTS = 0x0, + RX_UCAST = 0x1, + RX_BCAST = 0x2, + RX_MCAST = 0x3, + RX_DROP = 0x4, + RX_DROP_OCTS = 0x5, + RX_FCS = 0x6, + RX_ERR = 0x7, + RX_DRP_BCAST = 0x8, + RX_DRP_MCAST = 0x9, + RX_DRP_L3BCAST = 0xa, + RX_DRP_L3MCAST = 0xb, + RX_STATS_ENUM_LAST, +}; #endif /* RVU_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 854045ed3b06..268efb7c1c15 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -8,7 +8,7 @@ #include <linux/bitfield.h> #include "rvu.h" -static void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable) +void rvu_switch_enable_lbk_link(struct rvu *rvu, u16 pcifunc, bool enable) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct nix_hw *nix_hw; @@ -166,6 +166,8 @@ void rvu_switch_enable(struct rvu *rvu) alloc_req.contig = true; alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + if (rvu->rep_mode) + alloc_req.count = alloc_req.count * 4; ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, &alloc_rsp); if (ret) { @@ -189,7 +191,12 @@ void rvu_switch_enable(struct rvu *rvu) rswitch->used_entries = alloc_rsp.count; rswitch->start_entry = alloc_rsp.entry; - ret = rvu_switch_install_rules(rvu); + if (rvu->rep_mode) { + rvu_rep_pf_init(rvu); + ret = rvu_rep_install_mcam_rules(rvu); + } else { + ret = rvu_switch_install_rules(rvu); + } if (ret) goto uninstall_rules; @@ -222,6 +229,9 @@ void rvu_switch_disable(struct rvu *rvu) if (!rswitch->used_entries) return; + if (rvu->rep_mode) + goto free_ents; + for (pf = 1; pf < hw->total_pfs; pf++) { if (!is_pf_cgxmapped(rvu, pf)) continue; @@ -249,6 +259,7 @@ void rvu_switch_disable(struct rvu *rvu) } } +free_ents: uninstall_req.start = rswitch->start_entry; uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; free_req.all = 1; @@ -258,12 +269,15 @@ void rvu_switch_disable(struct rvu *rvu) kfree(rswitch->entry2pcifunc); } -void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc) +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc, bool ena) { struct rvu_switch *rswitch = &rvu->rswitch; u32 max = rswitch->used_entries; u16 entry; + if (rvu->rep_mode) + return rvu_rep_update_rules(rvu, pcifunc, ena); + if (!rswitch->used_entries) return; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 64a97a0a10ed..dbc971266865 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -5,11 +5,13 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o +obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ otx2_devlink.o qos_sq.o qos.o rvu_nicvf-y := otx2_vf.o +rvu_rep-y := rep.o rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 7417087b6db5..a15cc86635d6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -72,7 +72,7 @@ int cn10k_lmtst_init(struct otx2_nic *pfvf) } EXPORT_SYMBOL(cn10k_lmtst_init); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) { struct nix_cn10k_aq_enq_req *aq; struct otx2_nic *pfvf = dev; @@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); - aq->sq.default_chan = pfvf->hw.tx_chan_base; + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; aq->sq.sq_int_ena = NIX_SQINT_BITS; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index c1861f7de254..e3f0bce9908f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -26,7 +26,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); int cn10k_lmtst_init(struct otx2_nic *pfvf); int cn10k_free_all_ipolicers(struct otx2_nic *pfvf); int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 6e0183f0d5a1..523ecb798a7a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -83,6 +83,7 @@ int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); return 1; } +EXPORT_SYMBOL(otx2_update_rq_stats); int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) { @@ -99,6 +100,7 @@ int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); return 1; } +EXPORT_SYMBOL(otx2_update_sq_stats); void otx2_get_dev_stats(struct otx2_nic *pfvf) { @@ -246,13 +248,14 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) mutex_unlock(&pfvf->mbox.lock); return err; } +EXPORT_SYMBOL(otx2_hw_set_mtu); int otx2_config_pause_frm(struct otx2_nic *pfvf) { struct cgx_pause_frm_cfg *req; int err; - if (is_otx2_lbkvf(pfvf->pdev)) + if (is_otx2_lbkvf(pfvf->pdev) || is_otx2_sdp_rep(pfvf->pdev)) return 0; mutex_lock(&pfvf->mbox.lock); @@ -646,12 +649,22 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); req->regval[2] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL4) { + int sdp_chan = hw->tx_chan_base + prio; + + if (is_otx2_sdp_rep(pfvf->pdev)) + prio = 0; parent = schq_list[NIX_TXSCH_LVL_TL3][prio]; req->reg[0] = NIX_AF_TL4X_PARENT(schq); req->regval[0] = (u64)parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); req->regval[1] = dwrr_val; + if (is_otx2_sdp_rep(pfvf->pdev)) { + req->num_regs++; + req->reg[2] = NIX_AF_TL4X_SDP_LINK_CFG(schq); + req->regval[2] = BIT_ULL(12) | BIT_ULL(13) | + (sdp_chan & 0xff); + } } else if (lvl == NIX_TXSCH_LVL_TL3) { parent = schq_list[NIX_TXSCH_LVL_TL2][prio]; req->reg[0] = NIX_AF_TL3X_PARENT(schq); @@ -659,7 +672,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for req->num_regs++; req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); req->regval[1] = dwrr_val; - if (lvl == hw->txschq_link_cfg_lvl) { + if (lvl == hw->txschq_link_cfg_lvl && + !is_otx2_sdp_rep(pfvf->pdev)) { req->num_regs++; req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure @@ -676,7 +690,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val; - if (lvl == hw->txschq_link_cfg_lvl) { + if (lvl == hw->txschq_link_cfg_lvl && + !is_otx2_sdp_rep(pfvf->pdev)) { req->num_regs++; req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure @@ -735,6 +750,7 @@ EXPORT_SYMBOL(otx2_smq_flush); int otx2_txsch_alloc(struct otx2_nic *pfvf) { + int chan_cnt = pfvf->hw.tx_chan_cnt; struct nix_txsch_alloc_req *req; struct nix_txsch_alloc_rsp *rsp; int lvl, schq, rc; @@ -747,6 +763,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf) /* Request one schq per level */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) req->schq[lvl] = 1; + + if (is_otx2_sdp_rep(pfvf->pdev) && chan_cnt > 1) { + req->schq[NIX_TXSCH_LVL_SMQ] = chan_cnt; + req->schq[NIX_TXSCH_LVL_TL4] = chan_cnt; + } + rc = otx2_sync_mbox_msg(&pfvf->mbox); if (rc) return rc; @@ -757,10 +779,12 @@ int otx2_txsch_alloc(struct otx2_nic *pfvf) return PTR_ERR(rsp); /* Setup transmit scheduler list */ - for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + pfvf->hw.txschq_cnt[lvl] = rsp->schq[lvl]; for (schq = 0; schq < rsp->schq[lvl]; schq++) pfvf->hw.txschq_list[lvl][schq] = rsp->schq_list[lvl][schq]; + } pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; @@ -798,12 +822,15 @@ EXPORT_SYMBOL(otx2_txschq_free_one); void otx2_txschq_stop(struct otx2_nic *pfvf) { - int lvl, schq; + int lvl, schq, idx; /* free non QOS TLx nodes */ - for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) - otx2_txschq_free_one(pfvf, lvl, - pfvf->hw.txschq_list[lvl][0]); + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + for (idx = 0; idx < pfvf->hw.txschq_cnt[lvl]; idx++) { + otx2_txschq_free_one(pfvf, lvl, + pfvf->hw.txschq_list[lvl][idx]); + } + } /* Clear the txschq list */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { @@ -883,7 +910,7 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) return otx2_sync_mbox_msg(&pfvf->mbox); } -int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) +int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura) { struct otx2_nic *pfvf = dev; struct otx2_snd_queue *sq; @@ -902,7 +929,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); - aq->sq.default_chan = pfvf->hw.tx_chan_base; + aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; aq->sq.sq_int_ena = NIX_SQINT_BITS; @@ -925,6 +952,7 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) struct otx2_qset *qset = &pfvf->qset; struct otx2_snd_queue *sq; struct otx2_pool *pool; + u8 chan_offset; int err; pool = &pfvf->qset.pool[sqb_aura]; @@ -971,7 +999,8 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) sq->stats.bytes = 0; sq->stats.pkts = 0; - err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + chan_offset = qidx % pfvf->hw.tx_chan_cnt; + err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura); if (err) { kfree(sq->sg); sq->sg = NULL; @@ -1738,6 +1767,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, pfvf->hw.sqb_size = rsp->sqb_size; pfvf->hw.rx_chan_base = rsp->rx_chan_base; pfvf->hw.tx_chan_base = rsp->tx_chan_base; + pfvf->hw.rx_chan_cnt = rsp->rx_chan_cnt; + pfvf->hw.tx_chan_cnt = rsp->tx_chan_cnt; pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; pfvf->hw.cgx_links = rsp->cgx_links; @@ -1782,6 +1813,7 @@ void otx2_free_cints(struct otx2_nic *pfvf, int n) free_irq(vector, &qset->napi[qidx]); } } +EXPORT_SYMBOL(otx2_free_cints); void otx2_set_cints_affinity(struct otx2_nic *pfvf) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 327254e578d5..566848663fea 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -29,6 +29,7 @@ #include "otx2_devlink.h" #include <rvu_trace.h> #include "qos.h" +#include "rep.h" /* IPv4 flag more fragment bit */ #define IPV4_FLAG_MORE 0x20 @@ -41,6 +42,8 @@ #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 #define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00 +#define PCI_DEVID_OCTEONTX2_SDP_REP 0xA0F7 + /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 2 #define PCI_MBOX_BAR_NUM 4 @@ -120,33 +123,6 @@ enum otx2_errcodes_re { ERRCODE_IL4_CSUM = 0x22, }; -/* NIX TX stats */ -enum nix_stat_lf_tx { - TX_UCAST = 0x0, - TX_BCAST = 0x1, - TX_MCAST = 0x2, - TX_DROP = 0x3, - TX_OCTS = 0x4, - TX_STATS_ENUM_LAST, -}; - -/* NIX RX stats */ -enum nix_stat_lf_rx { - RX_OCTS = 0x0, - RX_UCAST = 0x1, - RX_BCAST = 0x2, - RX_MCAST = 0x3, - RX_DROP = 0x4, - RX_DROP_OCTS = 0x5, - RX_FCS = 0x6, - RX_ERR = 0x7, - RX_DRP_BCAST = 0x8, - RX_DRP_MCAST = 0x9, - RX_DRP_L3BCAST = 0xa, - RX_DRP_L3MCAST = 0xb, - RX_STATS_ENUM_LAST, -}; - struct otx2_dev_stats { u64 rx_bytes; u64 rx_frames; @@ -224,6 +200,7 @@ struct otx2_hw { /* NIX */ u8 txschq_link_cfg_lvl; + u8 txschq_cnt[NIX_TXSCH_LVL_CNT]; u8 txschq_aggr_lvl_rr_prio; u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 matchall_ipolicer; @@ -234,6 +211,8 @@ struct otx2_hw { /* HW settings, coalescing etc */ u16 rx_chan_base; u16 tx_chan_base; + u8 rx_chan_cnt; + u8 tx_chan_cnt; u16 cq_qcount_wait; u16 cq_ecount_wait; u16 rq_skid; @@ -368,7 +347,8 @@ struct otx2_flow_config { }; struct dev_hw_ops { - int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); + int (*sq_aq_init)(void *dev, u16 qidx, u8 chan_offset, + u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); @@ -466,6 +446,8 @@ struct otx2_nic { #define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15) #define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16) #define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17) +#define OTX2_FLAG_REP_MODE_ENABLED BIT_ULL(18) +#define OTX2_FLAG_PORT_UP BIT_ULL(19) u64 flags; u64 *cq_op_addr; @@ -533,11 +515,19 @@ struct otx2_nic { #if IS_ENABLED(CONFIG_MACSEC) struct cn10k_mcs_cfg *macsec_cfg; #endif + +#if IS_ENABLED(CONFIG_RVU_ESWITCH) + struct rep_dev **reps; + int rep_cnt; + u16 rep_pf_map[RVU_MAX_REP]; + u16 esw_mode; +#endif }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) { - return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; + return (pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF) || + (pdev->device == PCI_DEVID_RVU_REP); } static inline bool is_96xx_A0(struct pci_dev *pdev) @@ -552,6 +542,11 @@ static inline bool is_96xx_B0(struct pci_dev *pdev) (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); } +static inline bool is_otx2_sdp_rep(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVID_OCTEONTX2_SDP_REP; +} + /* REVID for PCIe devices. * Bits 0..1: minor pass, bit 3..2: major pass * bits 7..4: midr id @@ -914,15 +909,19 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf, static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx) { u16 smq; + int idx; + #ifdef CONFIG_DCB if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; #endif /* check if qidx falls under QOS queues */ - if (qidx >= pfvf->hw.non_qos_queues) + if (qidx >= pfvf->hw.non_qos_queues) { smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; - else - smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; + } else { + idx = qidx % pfvf->hw.txschq_cnt[NIX_TXSCH_LVL_SMQ]; + smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][idx]; + } return smq; } @@ -989,8 +988,8 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); -int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); -int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); +int otx2_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); +int cn10k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura); int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma); int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, @@ -1142,4 +1141,12 @@ u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, int otx2_get_txq_by_classid(struct otx2_nic *pfvf, u16 classid); void otx2_qos_config_txschq(struct otx2_nic *pfvf); void otx2_clean_qos_queues(struct otx2_nic *pfvf); +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); +int otx2_setup_tc_cls_flower(struct otx2_nic *nic, + struct flow_cls_offload *cls_flower); + +static inline int mcam_entry_cmp(const void *a, const void *b) +{ + return *(u16 *)a - *(u16 *)b; +} #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 53f14aa944bd..33ec9a7f7c03 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -141,7 +141,56 @@ static const struct devlink_param otx2_dl_params[] = { otx2_dl_ucast_flt_cnt_validate), }; +#ifdef CONFIG_RVU_ESWITCH +static int otx2_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + if (!otx2_rep_dev(pfvf->pdev)) + return -EOPNOTSUPP; + + *mode = pfvf->esw_mode; + + return 0; +} + +static int otx2_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + int ret = 0; + + if (!otx2_rep_dev(pfvf->pdev)) + return -EOPNOTSUPP; + + if (pfvf->esw_mode == mode) + return 0; + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + rvu_rep_destroy(pfvf); + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + ret = rvu_rep_create(pfvf, extack); + break; + default: + return -EINVAL; + } + + if (!ret) + pfvf->esw_mode = mode; + + return ret; +} +#endif + static const struct devlink_ops otx2_devlink_ops = { +#ifdef CONFIG_RVU_ESWITCH + .eswitch_mode_get = otx2_devlink_eswitch_mode_get, + .eswitch_mode_set = otx2_devlink_eswitch_mode_set, +#endif }; int otx2_register_dl(struct otx2_nic *pfvf) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 58720a161ee2..47bfd1fb37d4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -64,11 +64,6 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) return 0; } -static int mcam_entry_cmp(const void *a, const void *b) -{ - return *(u16 *)a - *(u16 *)b; -} - int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e6b03bad2dba..e310f99b1736 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -519,6 +519,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work) switch (msg->id) { case MBOX_MSG_CGX_LINK_EVENT: + case MBOX_MSG_REP_EVENT_UP_NOTIFY: break; default: if (msg->rc) @@ -832,6 +833,9 @@ static void otx2_handle_link_event(struct otx2_nic *pf) struct cgx_link_user_info *linfo = &pf->linfo; struct net_device *netdev = pf->netdev; + if (pf->flags & OTX2_FLAG_PORT_UP) + return; + pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name, linfo->link_up ? "UP" : "DOWN", linfo->speed, linfo->full_duplex ? "Full" : "Half"); @@ -844,6 +848,35 @@ static void otx2_handle_link_event(struct otx2_nic *pf) } } +static int otx2_mbox_up_handler_rep_event_up_notify(struct otx2_nic *pf, + struct rep_event *info, + struct msg_rsp *rsp) +{ + struct net_device *netdev = pf->netdev; + + if (info->event == RVU_EVENT_MTU_CHANGE) { + netdev->mtu = info->evt_data.mtu; + return 0; + } + + if (info->event == RVU_EVENT_PORT_STATE) { + if (info->evt_data.port_state) { + pf->flags |= OTX2_FLAG_PORT_UP; + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + } else { + pf->flags &= ~OTX2_FLAG_PORT_UP; + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + return 0; + } +#ifdef CONFIG_RVU_ESWITCH + rvu_event_up_notify(pf, info); +#endif + return 0; +} + int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf, struct mcs_intr_info *event, struct msg_rsp *rsp) @@ -913,6 +946,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf, } MBOX_UP_CGX_MESSAGES MBOX_UP_MCS_MESSAGES +MBOX_UP_REP_MESSAGES #undef M break; default: @@ -1016,6 +1050,7 @@ void otx2_disable_mbox_intr(struct otx2_nic *pf) otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0)); free_irq(vector, pf); } +EXPORT_SYMBOL(otx2_disable_mbox_intr); int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af) { @@ -1076,6 +1111,7 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf) otx2_mbox_destroy(&mbox->mbox); otx2_mbox_destroy(&mbox->mbox_up); } +EXPORT_SYMBOL(otx2_pfaf_mbox_destroy); int otx2_pfaf_mbox_init(struct otx2_nic *pf) { @@ -1398,6 +1434,7 @@ irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq) return IRQ_HANDLED; } +EXPORT_SYMBOL(otx2_cq_intr_handler); void otx2_disable_napi(struct otx2_nic *pf) { @@ -1415,6 +1452,7 @@ void otx2_disable_napi(struct otx2_nic *pf) netif_napi_del(&cq_poll->napi); } } +EXPORT_SYMBOL(otx2_disable_napi); static void otx2_free_cq_res(struct otx2_nic *pf) { @@ -1496,10 +1534,11 @@ int otx2_init_hw_resources(struct otx2_nic *pf) hw->sqpool_cnt = otx2_get_total_tx_queues(pf); hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; - /* Maximum hardware supported transmit length */ - pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; - - pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); + if (!otx2_rep_dev(pf->pdev)) { + /* Maximum hardware supported transmit length */ + pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN; + pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu); + } mutex_lock(&mbox->lock); /* NPA init */ @@ -1552,10 +1591,15 @@ int otx2_init_hw_resources(struct otx2_nic *pf) } for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - err = otx2_txschq_config(pf, lvl, 0, false); - if (err) { - mutex_unlock(&mbox->lock); - goto err_free_nix_queues; + int idx; + + for (idx = 0; idx < pf->hw.txschq_cnt[lvl]; idx++) { + err = otx2_txschq_config(pf, lvl, idx, false); + if (err) { + dev_err(pf->dev, "Failed to config TXSCH\n"); + mutex_unlock(&mbox->lock); + goto err_free_nix_queues; + } } } @@ -1604,6 +1648,7 @@ exit: mutex_unlock(&mbox->lock); return err; } +EXPORT_SYMBOL(otx2_init_hw_resources); void otx2_free_hw_resources(struct otx2_nic *pf) { @@ -1627,11 +1672,12 @@ void otx2_free_hw_resources(struct otx2_nic *pf) otx2_pfc_txschq_stop(pf); #endif - otx2_clean_qos_queues(pf); + if (!otx2_rep_dev(pf->pdev)) + otx2_clean_qos_queues(pf); mutex_lock(&mbox->lock); /* Disable backpressure */ - if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK)) + if (!is_otx2_lbkvf(pf->pdev)) otx2_nix_config_bp(pf, false); mutex_unlock(&mbox->lock); @@ -1663,7 +1709,8 @@ void otx2_free_hw_resources(struct otx2_nic *pf) otx2_free_cq_res(pf); /* Free all ingress bandwidth profiles allocated */ - cn10k_free_all_ipolicers(pf); + if (!otx2_rep_dev(pf->pdev)) + cn10k_free_all_ipolicers(pf); mutex_lock(&mbox->lock); /* Reset NIX LF */ @@ -1691,6 +1738,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf) } mutex_unlock(&mbox->lock); } +EXPORT_SYMBOL(otx2_free_hw_resources); static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf) { @@ -1784,6 +1832,7 @@ void otx2_free_queue_mem(struct otx2_qset *qset) kfree(qset->napi); qset->napi = NULL; } +EXPORT_SYMBOL(otx2_free_queue_mem); int otx2_alloc_queue_mem(struct otx2_nic *pf) { @@ -1830,6 +1879,7 @@ err_free_mem: otx2_free_queue_mem(qset); return -ENOMEM; } +EXPORT_SYMBOL(otx2_alloc_queue_mem); int otx2_open(struct net_device *netdev) { @@ -1963,6 +2013,7 @@ int otx2_open(struct net_device *netdev) } pf->flags &= ~OTX2_FLAG_INTF_DOWN; + pf->flags &= ~OTX2_FLAG_PORT_UP; /* 'intf_down' may be checked on any cpu */ smp_wmb(); @@ -2105,7 +2156,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) sq = &pf->qset.sq[sq_idx]; txq = netdev_get_tx_queue(netdev, qidx); - if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { + if (!otx2_sq_append_skb(pf, txq, sq, skb, qidx)) { netif_tx_stop_queue(txq); /* Check again, incase SQBs got freed up */ @@ -2861,6 +2912,7 @@ int otx2_realloc_msix_vectors(struct otx2_nic *pf) return otx2_register_mbox_intr(pf, false); } +EXPORT_SYMBOL(otx2_realloc_msix_vectors); static int otx2_sriov_vfcfg_init(struct otx2_nic *pf) { @@ -2976,6 +3028,7 @@ err_free_irq_vectors: return err; } +EXPORT_SYMBOL(otx2_init_rsrc); static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index e63cc1eb6d89..9a226ca74425 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -443,6 +443,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, struct flow_action_entry *act; struct net_device *target; struct otx2_nic *priv; + struct rep_dev *rdev; u32 burst, mark = 0; u8 nr_police = 0; u8 num_intf = 1; @@ -464,14 +465,18 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, return 0; case FLOW_ACTION_REDIRECT_INGRESS: target = act->dev; - priv = netdev_priv(target); - /* npc_install_flow_req doesn't support passing a target pcifunc */ - if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { - NL_SET_ERR_MSG_MOD(extack, - "can't redirect to other pf/vf"); - return -EOPNOTSUPP; + if (target->dev.parent) { + priv = netdev_priv(target); + if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) { + NL_SET_ERR_MSG_MOD(extack, + "can't redirect to other pf/vf"); + return -EOPNOTSUPP; + } + req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; + } else { + rdev = netdev_priv(target); + req->vf = rdev->pcifunc & RVU_PFVF_FUNC_MASK; } - req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK; /* if op is already set; avoid overwriting the same */ if (!req->op) @@ -1300,6 +1305,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, req->channel = nic->hw.rx_chan_base; req->entry = flow_cfg->flow_ent[mcam_idx]; req->intf = NIX_INTF_RX; + req->vf = nic->pcifunc; req->set_cntr = 1; new_node->entry = req->entry; @@ -1400,8 +1406,8 @@ static int otx2_tc_get_flow_stats(struct otx2_nic *nic, return 0; } -static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, - struct flow_cls_offload *cls_flower) +int otx2_setup_tc_cls_flower(struct otx2_nic *nic, + struct flow_cls_offload *cls_flower) { switch (cls_flower->command) { case FLOW_CLS_REPLACE: @@ -1414,6 +1420,7 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic, return -EOPNOTSUPP; } } +EXPORT_SYMBOL(otx2_setup_tc_cls_flower); static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic, struct tc_cls_matchall_offload *cls) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 7aaf32e9aa95..04bc06a80e23 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -376,9 +376,11 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, } otx2_set_rxhash(pfvf, cqe, skb); - skb_record_rx_queue(skb, cq->cq_idx); - if (pfvf->netdev->features & NETIF_F_RXCSUM) - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (!(pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED)) { + skb_record_rx_queue(skb, cq->cq_idx); + if (pfvf->netdev->features & NETIF_F_RXCSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED) skb->mark = parse->match_id; @@ -453,6 +455,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, int tx_pkts = 0, tx_bytes = 0, qidx; struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; + struct net_device *ndev; int processed_cqe = 0; if (cq->pend_cqe >= budget) @@ -493,6 +496,13 @@ process_cqe: otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); +#if IS_ENABLED(CONFIG_RVU_ESWITCH) + if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED) + ndev = pfvf->reps[qidx]->netdev; + else +#endif + ndev = pfvf->netdev; + if (likely(tx_pkts)) { struct netdev_queue *txq; @@ -500,12 +510,14 @@ process_cqe: if (qidx >= pfvf->hw.tx_queues) qidx -= pfvf->hw.xdp_queues; - txq = netdev_get_tx_queue(pfvf->netdev, qidx); + if (pfvf->flags & OTX2_FLAG_REP_MODE_ENABLED) + qidx = 0; + txq = netdev_get_tx_queue(ndev, qidx); netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); /* Check if queue was stopped earlier due to ring full */ smp_mb(); if (netif_tx_queue_stopped(txq) && - netif_carrier_ok(pfvf->netdev)) + netif_carrier_ok(ndev)) netif_tx_wake_queue(txq); } return 0; @@ -594,6 +606,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } return workdone; } +EXPORT_SYMBOL(otx2_napi_handler); void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) @@ -1141,13 +1154,13 @@ static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, } } -bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, +bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) { - struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); - struct otx2_nic *pfvf = netdev_priv(netdev); int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_nic *pfvf = dev; /* Check if there is enough room between producer * and consumer index. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 3f1d2655ff77..e1db5f961877 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -167,7 +167,8 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr) } int otx2_napi_handler(struct napi_struct *napi, int budget); -bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, +bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, + struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index c4e6c78a8deb..839fc77c11b2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -21,6 +21,7 @@ static const struct pci_device_id otx2_vf_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) }, { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_SDP_REP) }, { } }; @@ -371,7 +372,7 @@ static int otx2vf_open(struct net_device *netdev) /* LBKs do not receive link events so tell everyone we are up here */ vf = netdev_priv(netdev); - if (is_otx2_lbkvf(vf->pdev)) { + if (is_otx2_lbkvf(vf->pdev) || is_otx2_sdp_rep(vf->pdev)) { pr_info("%s NIC Link is UP\n", netdev->name); netif_carrier_on(netdev); netif_tx_start_all_queues(netdev); @@ -395,7 +396,7 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev) sq = &vf->qset.sq[qidx]; txq = netdev_get_tx_queue(netdev, qidx); - if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) { + if (!otx2_sq_append_skb(vf, txq, sq, skb, qidx)) { netif_tx_stop_queue(txq); /* Check again, incase SQBs got freed up */ @@ -683,6 +684,15 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) snprintf(netdev->name, sizeof(netdev->name), "lbk%d", n); } + if (is_otx2_sdp_rep(vf->pdev)) { + int n; + + n = vf->pcifunc & RVU_PFVF_FUNC_MASK; + n -= 1; + snprintf(netdev->name, sizeof(netdev->name), "sdp%d-%d", + pdev->bus->number, n); + } + err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.c b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c new file mode 100644 index 000000000000..ae58d0601b45 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.c @@ -0,0 +1,864 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU representor driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/net_tstamp.h> +#include <linux/sort.h> + +#include "otx2_common.h" +#include "cn10k.h" +#include "otx2_reg.h" +#include "rep.h" + +#define DRV_NAME "rvu_rep" +#define DRV_STRING "Marvell RVU Representor Driver" + +static const struct pci_device_id rvu_rep_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) }, + { } +}; + +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_DESCRIPTION(DRV_STRING); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, rvu_rep_id_table); + +static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event, + struct rep_event *data); + +static int rvu_rep_mcam_flow_init(struct rep_dev *rep) +{ + struct npc_mcam_alloc_entry_req *req; + struct npc_mcam_alloc_entry_rsp *rsp; + struct otx2_nic *priv = rep->mdev; + int ent, allocated = 0; + int count; + + rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL); + + if (!rep->flow_cfg) + return -ENOMEM; + + count = OTX2_DEFAULT_FLOWCOUNT; + + rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL); + if (!rep->flow_cfg->flow_ent) + return -ENOMEM; + + while (allocated < count) { + req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox); + if (!req) + goto exit; + + req->hdr.pcifunc = rep->pcifunc; + req->contig = false; + req->ref_entry = 0; + req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? + NPC_MAX_NONCONTIG_ENTRIES : count - allocated; + + if (otx2_sync_mbox_msg(&priv->mbox)) + goto exit; + + rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp + (&priv->mbox.mbox, 0, &req->hdr); + + for (ent = 0; ent < rsp->count; ent++) + rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; + + allocated += rsp->count; + + if (rsp->count != req->count) + break; + } +exit: + /* Multiple MCAM entry alloc requests could result in non-sequential + * MCAM entries in the flow_ent[] array. Sort them in an ascending + * order, otherwise user installed ntuple filter index and MCAM entry + * index will not be in sync. + */ + if (allocated) + sort(&rep->flow_cfg->flow_ent[0], allocated, + sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL); + + mutex_unlock(&priv->mbox.lock); + + rep->flow_cfg->max_flows = allocated; + + if (allocated) { + rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; + rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; + } + + INIT_LIST_HEAD(&rep->flow_cfg->flow_list); + INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc); + return 0; +} + +static int rvu_rep_setup_tc_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct rep_dev *rep = cb_priv; + struct otx2_nic *priv = rep->mdev; + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return -EINVAL; + + if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) + rvu_rep_mcam_flow_init(rep); + + priv->netdev = rep->netdev; + priv->flags = rep->flags; + priv->pcifunc = rep->pcifunc; + priv->flow_cfg = rep->flow_cfg; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return otx2_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(rvu_rep_block_cb_list); +static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type, + void *type_data) +{ + struct rvu_rep *rep = netdev_priv(netdev); + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &rvu_rep_block_cb_list, + rvu_rep_setup_tc_cb, + rep, rep, true); + default: + return -EOPNOTSUPP; + } +} + +static int +rvu_rep_sp_stats64(const struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct otx2_rcv_queue *rq; + struct otx2_snd_queue *sq; + u16 qidx = rep->rep_id; + + otx2_update_rq_stats(priv, qidx); + rq = &priv->qset.rq[qidx]; + + otx2_update_sq_stats(priv, qidx); + sq = &priv->qset.sq[qidx]; + + stats->tx_bytes = sq->stats.bytes; + stats->tx_packets = sq->stats.pkts; + stats->rx_bytes = rq->stats.bytes; + stats->rx_packets = rq->stats.pkts; + return 0; +} + +static bool +rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id) +{ + return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT; +} + +static int +rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) +{ + if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT) + return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp); + + return -EINVAL; +} + +static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct rep_dev *rep = container_of(port, struct rep_dev, dl_port); + + ether_addr_copy(hw_addr, rep->mac); + *hw_addr_len = ETH_ALEN; + return 0; +} + +static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct rep_dev *rep = container_of(port, struct rep_dev, dl_port); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + eth_hw_addr_set(rep->netdev, hw_addr); + ether_addr_copy(rep->mac, hw_addr); + + ether_addr_copy(evt.evt_data.mac, hw_addr); + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt); + return 0; +} + +static const struct devlink_port_ops rvu_rep_dl_port_ops = { + .port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get, + .port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set, +}; + +static void +rvu_rep_devlink_set_switch_id(struct otx2_nic *priv, + struct netdev_phys_item_id *ppid) +{ + struct pci_dev *pdev = priv->pdev; + u64 id; + + id = pci_get_dsn(pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + +static void rvu_rep_devlink_port_unregister(struct rep_dev *rep) +{ + devlink_port_unregister(&rep->dl_port); +} + +static int rvu_rep_devlink_port_register(struct rep_dev *rep) +{ + struct devlink_port_attrs attrs = {}; + struct otx2_nic *priv = rep->mdev; + struct devlink *dl = priv->dl->dl; + int err; + + if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) { + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = rvu_get_pf(rep->pcifunc); + } else { + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; + attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc); + attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK; + } + + rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id); + devlink_port_attrs_set(&rep->dl_port, &attrs); + + err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id, + &rvu_rep_dl_port_ops); + if (err) { + dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n", + err); + return err; + } + return 0; +} + +static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc) +{ + int rep_id; + + for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) + if (priv->rep_pf_map[rep_id] == pcifunc) + return rep_id; + return -EINVAL; +} + +static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event, + struct rep_event *data) +{ + struct rep_event *req; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return -ENOMEM; + } + req->event = event; + req->pcifunc = data->pcifunc; + + memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data)); + otx2_sync_mbox_msg(&priv->mbox); + mutex_unlock(&priv->mbox.lock); + return 0; +} + +static void rvu_rep_state_evt_handler(struct otx2_nic *priv, + struct rep_event *info) +{ + struct rep_dev *rep; + int rep_id; + + rep_id = rvu_rep_get_repid(priv, info->pcifunc); + rep = priv->reps[rep_id]; + if (info->evt_data.vf_state) + rep->flags |= RVU_REP_VF_INITIALIZED; + else + rep->flags &= ~RVU_REP_VF_INITIALIZED; +} + +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info) +{ + if (info->event & RVU_EVENT_PFVF_STATE) + rvu_rep_state_evt_handler(pf, info); + return 0; +} + +static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + netdev_info(dev, "Changing MTU from %d to %d\n", + dev->mtu, new_mtu); + dev->mtu = new_mtu; + + evt.evt_data.mtu = new_mtu; + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt); + return 0; +} + +static void rvu_rep_get_stats(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct nix_stats_req *req; + struct nix_stats_rsp *rsp; + struct rep_stats *stats; + struct otx2_nic *priv; + struct rep_dev *rep; + int err; + + rep = container_of(del_work, struct rep_dev, stats_wrk); + priv = rep->mdev; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return; + } + req->pcifunc = rep->pcifunc; + err = otx2_sync_mbox_msg_busy_poll(&priv->mbox); + if (err) + goto exit; + + rsp = (struct nix_stats_rsp *) + otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); + + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto exit; + } + + stats = &rep->stats; + stats->rx_bytes = rsp->rx.octs; + stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast + + rsp->rx.mcast; + stats->rx_drops = rsp->rx.drop; + stats->rx_mcast_frames = rsp->rx.mcast; + stats->tx_bytes = rsp->tx.octs; + stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast; + stats->tx_drops = rsp->tx.drop; +exit: + mutex_unlock(&priv->mbox.lock); +} + +static void rvu_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct rep_dev *rep = netdev_priv(dev); + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return; + + stats->rx_packets = rep->stats.rx_frames; + stats->rx_bytes = rep->stats.rx_bytes; + stats->rx_dropped = rep->stats.rx_drops; + stats->multicast = rep->stats.rx_mcast_frames; + + stats->tx_packets = rep->stats.tx_frames; + stats->tx_bytes = rep->stats.tx_bytes; + stats->tx_dropped = rep->stats.tx_drops; + + schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100)); +} + +static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena) +{ + struct esw_cfg_req *req; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return -ENOMEM; + } + req->ena = ena; + otx2_sync_mbox_msg(&priv->mbox); + mutex_unlock(&priv->mbox.lock); + return 0; +} + +static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *pf = rep->mdev; + struct otx2_snd_queue *sq; + struct netdev_queue *txq; + + sq = &pf->qset.sq[rep->rep_id]; + txq = netdev_get_tx_queue(dev, 0); + + if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) { + netif_tx_stop_queue(txq); + + /* Check again, in case SQBs got freed up */ + smp_mb(); + if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb) + > sq->sqe_thresh) + netif_tx_wake_queue(txq); + + return NETDEV_TX_BUSY; + } + return NETDEV_TX_OK; +} + +static int rvu_rep_open(struct net_device *dev) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return 0; + + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + + evt.event = RVU_EVENT_PORT_STATE; + evt.evt_data.port_state = 1; + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt); + return 0; +} + +static int rvu_rep_stop(struct net_device *dev) +{ + struct rep_dev *rep = netdev_priv(dev); + struct otx2_nic *priv = rep->mdev; + struct rep_event evt = {0}; + + if (!(rep->flags & RVU_REP_VF_INITIALIZED)) + return 0; + + netif_carrier_off(dev); + netif_tx_disable(dev); + + evt.event = RVU_EVENT_PORT_STATE; + evt.pcifunc = rep->pcifunc; + rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt); + return 0; +} + +static const struct net_device_ops rvu_rep_netdev_ops = { + .ndo_open = rvu_rep_open, + .ndo_stop = rvu_rep_stop, + .ndo_start_xmit = rvu_rep_xmit, + .ndo_get_stats64 = rvu_rep_get_stats64, + .ndo_change_mtu = rvu_rep_change_mtu, + .ndo_has_offload_stats = rvu_rep_has_offload_stats, + .ndo_get_offload_stats = rvu_rep_get_offload_stats, + .ndo_setup_tc = rvu_rep_setup_tc, +}; + +static int rvu_rep_napi_init(struct otx2_nic *priv, + struct netlink_ext_ack *extack) +{ + struct otx2_qset *qset = &priv->qset; + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_hw *hw = &priv->hw; + int err = 0, qidx, vec; + char *irq_name; + + qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL); + if (!qset->napi) + return -ENOMEM; + + /* Register NAPI handler */ + for (qidx = 0; qidx < hw->cint_cnt; qidx++) { + cq_poll = &qset->napi[qidx]; + cq_poll->cint_idx = qidx; + cq_poll->cq_ids[CQ_RX] = + (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ? + qidx + hw->rx_queues : + CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; + cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ; + + cq_poll->dev = (void *)priv; + netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi, + otx2_napi_handler); + napi_enable(&cq_poll->napi); + } + /* Register CQ IRQ handlers */ + vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < hw->cint_cnt; qidx++) { + irq_name = &hw->irq_name[vec * NAME_SIZE]; + + snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx); + + err = request_irq(pci_irq_vector(priv->pdev, vec), + otx2_cq_intr_handler, 0, irq_name, + &qset->napi[qidx]); + if (err) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "RVU REP IRQ registration failed for CQ%d", + qidx); + goto err_free_cints; + } + vec++; + + /* Enable CQ IRQ */ + otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0)); + otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0)); + } + priv->flags &= ~OTX2_FLAG_INTF_DOWN; + return 0; + +err_free_cints: + otx2_free_cints(priv, qidx); + otx2_disable_napi(priv); + return err; +} + +static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv) +{ + struct otx2_qset *qset = &priv->qset; + struct otx2_cq_poll *cq_poll = NULL; + int qidx, vec; + + /* Cleanup CQ NAPI and IRQ */ + vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START; + for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) { + /* Disable interrupt */ + otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0)); + + synchronize_irq(pci_irq_vector(priv->pdev, vec)); + + cq_poll = &qset->napi[qidx]; + napi_synchronize(&cq_poll->napi); + vec++; + } + otx2_free_cints(priv, priv->hw.cint_cnt); + otx2_disable_napi(priv); +} + +static void rvu_rep_rsrc_free(struct otx2_nic *priv) +{ + struct otx2_qset *qset = &priv->qset; + struct delayed_work *work; + int wrk; + + for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) { + work = &priv->refill_wrk[wrk].pool_refill_work; + cancel_delayed_work_sync(work); + } + devm_kfree(priv->dev, priv->refill_wrk); + + otx2_free_hw_resources(priv); + otx2_free_queue_mem(qset); +} + +static int rvu_rep_rsrc_init(struct otx2_nic *priv) +{ + struct otx2_qset *qset = &priv->qset; + int err; + + err = otx2_alloc_queue_mem(priv); + if (err) + return err; + + priv->hw.max_mtu = otx2_get_max_mtu(priv); + priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN; + priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM; + + err = otx2_init_hw_resources(priv); + if (err) + goto err_free_rsrc; + + /* Set maximum frame size allowed in HW */ + err = otx2_hw_set_mtu(priv, priv->hw.max_mtu); + if (err) { + dev_err(priv->dev, "Failed to set HW MTU\n"); + goto err_free_rsrc; + } + return 0; + +err_free_rsrc: + otx2_free_hw_resources(priv); + otx2_free_queue_mem(qset); + return err; +} + +void rvu_rep_destroy(struct otx2_nic *priv) +{ + struct rep_dev *rep; + int rep_id; + + rvu_eswitch_config(priv, false); + priv->flags |= OTX2_FLAG_INTF_DOWN; + rvu_rep_free_cq_rsrc(priv); + for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) { + rep = priv->reps[rep_id]; + unregister_netdev(rep->netdev); + rvu_rep_devlink_port_unregister(rep); + free_netdev(rep->netdev); + kfree(rep->flow_cfg); + } + kfree(priv->reps); + rvu_rep_rsrc_free(priv); +} + +int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack) +{ + int rep_cnt = priv->rep_cnt; + struct net_device *ndev; + struct rep_dev *rep; + int rep_id, err; + u16 pcifunc; + + err = rvu_rep_rsrc_init(priv); + if (err) + return -ENOMEM; + + priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL); + if (!priv->reps) + return -ENOMEM; + + for (rep_id = 0; rep_id < rep_cnt; rep_id++) { + ndev = alloc_etherdev(sizeof(*rep)); + if (!ndev) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "PFVF representor:%d creation failed", + rep_id); + err = -ENOMEM; + goto exit; + } + + rep = netdev_priv(ndev); + priv->reps[rep_id] = rep; + rep->mdev = priv; + rep->netdev = ndev; + rep->rep_id = rep_id; + + ndev->min_mtu = OTX2_MIN_MTU; + ndev->max_mtu = priv->hw.max_mtu; + ndev->netdev_ops = &rvu_rep_netdev_ops; + pcifunc = priv->rep_pf_map[rep_id]; + rep->pcifunc = pcifunc; + + snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d", + rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK)); + + ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6); + + ndev->hw_features |= NETIF_F_HW_TC; + ndev->features |= ndev->hw_features; + eth_hw_addr_random(ndev); + err = rvu_rep_devlink_port_register(rep); + if (err) + goto exit; + + SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port); + err = register_netdev(ndev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "PFVF reprentator registration failed"); + free_netdev(ndev); + goto exit; + } + + INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats); + } + err = rvu_rep_napi_init(priv, extack); + if (err) + goto exit; + + rvu_eswitch_config(priv, true); + return 0; +exit: + while (--rep_id >= 0) { + rep = priv->reps[rep_id]; + unregister_netdev(rep->netdev); + rvu_rep_devlink_port_unregister(rep); + free_netdev(rep->netdev); + } + kfree(priv->reps); + rvu_rep_rsrc_free(priv); + return err; +} + +static int rvu_get_rep_cnt(struct otx2_nic *priv) +{ + struct get_rep_cnt_rsp *rsp; + struct mbox_msghdr *msghdr; + struct msg_req *req; + int err, rep; + + mutex_lock(&priv->mbox.lock); + req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox); + if (!req) { + mutex_unlock(&priv->mbox.lock); + return -ENOMEM; + } + err = otx2_sync_mbox_msg(&priv->mbox); + if (err) + goto exit; + + msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr); + if (IS_ERR(msghdr)) { + err = PTR_ERR(msghdr); + goto exit; + } + + rsp = (struct get_rep_cnt_rsp *)msghdr; + priv->hw.tx_queues = rsp->rep_cnt; + priv->hw.rx_queues = rsp->rep_cnt; + priv->rep_cnt = rsp->rep_cnt; + for (rep = 0; rep < priv->rep_cnt; rep++) + priv->rep_pf_map[rep] = rsp->rep_pf_map[rep]; + +exit: + mutex_unlock(&priv->mbox.lock); + return err; +} + +static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct otx2_nic *priv; + struct otx2_hw *hw; + int err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(dev, "PCI request regions failed 0x%x\n", err); + return err; + } + + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (err) { + dev_err(dev, "DMA mask config failed, abort\n"); + goto err_release_regions; + } + + pci_set_master(pdev); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + err = -ENOMEM; + goto err_release_regions; + } + + pci_set_drvdata(pdev, priv); + priv->pdev = pdev; + priv->dev = dev; + priv->flags |= OTX2_FLAG_INTF_DOWN; + priv->flags |= OTX2_FLAG_REP_MODE_ENABLED; + + hw = &priv->hw; + hw->pdev = pdev; + hw->max_queues = OTX2_MAX_CQ_CNT; + hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN; + hw->xqe_size = 128; + + err = otx2_init_rsrc(pdev, priv); + if (err) + goto err_release_regions; + + priv->iommu_domain = iommu_get_domain_for_dev(dev); + + err = rvu_get_rep_cnt(priv); + if (err) + goto err_detach_rsrc; + + err = otx2_register_dl(priv); + if (err) + goto err_detach_rsrc; + + return 0; + +err_detach_rsrc: + if (priv->hw.lmt_info) + free_percpu(priv->hw.lmt_info); + if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) + qmem_free(priv->dev, priv->dync_lmt); + otx2_detach_resources(&priv->mbox); + otx2_disable_mbox_intr(priv); + otx2_pfaf_mbox_destroy(priv); + pci_free_irq_vectors(pdev); +err_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + return err; +} + +static void rvu_rep_remove(struct pci_dev *pdev) +{ + struct otx2_nic *priv = pci_get_drvdata(pdev); + + otx2_unregister_dl(priv); + if (!(priv->flags & OTX2_FLAG_INTF_DOWN)) + rvu_rep_destroy(priv); + otx2_detach_resources(&priv->mbox); + if (priv->hw.lmt_info) + free_percpu(priv->hw.lmt_info); + if (test_bit(CN10K_LMTST, &priv->hw.cap_flag)) + qmem_free(priv->dev, priv->dync_lmt); + otx2_disable_mbox_intr(priv); + otx2_pfaf_mbox_destroy(priv); + pci_free_irq_vectors(priv->pdev); + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); +} + +static struct pci_driver rvu_rep_driver = { + .name = DRV_NAME, + .id_table = rvu_rep_id_table, + .probe = rvu_rep_probe, + .remove = rvu_rep_remove, + .shutdown = rvu_rep_remove, +}; + +static int __init rvu_rep_init_module(void) +{ + return pci_register_driver(&rvu_rep_driver); +} + +static void __exit rvu_rep_cleanup_module(void) +{ + pci_unregister_driver(&rvu_rep_driver); +} + +module_init(rvu_rep_init_module); +module_exit(rvu_rep_cleanup_module); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/rep.h b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h new file mode 100644 index 000000000000..38446b3e4f13 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/rep.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU REPRESENTOR driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef REP_H +#define REP_H + +#include <linux/pci.h> + +#include "otx2_reg.h" +#include "otx2_txrx.h" +#include "otx2_common.h" + +#define PCI_DEVID_RVU_REP 0xA0E0 + +#define RVU_MAX_REP OTX2_MAX_CQ_CNT + +struct rep_stats { + u64 rx_bytes; + u64 rx_frames; + u64 rx_drops; + u64 rx_mcast_frames; + + u64 tx_bytes; + u64 tx_frames; + u64 tx_drops; +}; + +struct rep_dev { + struct otx2_nic *mdev; + struct net_device *netdev; + struct rep_stats stats; + struct delayed_work stats_wrk; + struct devlink_port dl_port; + struct otx2_flow_config *flow_cfg; +#define RVU_REP_VF_INITIALIZED BIT_ULL(0) + u64 flags; + u16 rep_id; + u16 pcifunc; + u8 mac[ETH_ALEN]; +}; + +static inline bool otx2_rep_dev(struct pci_dev *pdev) +{ + return pdev->device == PCI_DEVID_RVU_REP; +} + +int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack); +void rvu_rep_destroy(struct otx2_nic *priv); +int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info); +#endif /* REP_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 4877a9d86807..a84ebac2f011 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -866,7 +866,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, return 0; err_rule: - mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh); + mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, attr, zone_rule->mh); mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); err_mod_hdr: kfree(attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index d61be26a4df1..3db31cc10719 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -660,7 +660,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, while (remaining > 0) { skb_frag_t *frag = &record->frags[i]; - get_page(skb_frag_page(frag)); + page_ref_inc(skb_frag_page(frag)); remaining -= skb_frag_size(frag); info->frags[i++] = *frag; } @@ -763,7 +763,7 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, stats = sq->stats; mlx5e_tx_dma_unmap(sq->pdev, dma); - put_page(wi->resync_dump_frag_page); + page_ref_dec(wi->resync_dump_frag_page); stats->tls_dump_packets++; stats->tls_dump_bytes += wi->num_bytes; } @@ -816,12 +816,12 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, err_out: for (; i < info.nr_frags; i++) - /* The put_page() here undoes the page ref obtained in tx_sync_info_get(). + /* The page_ref_dec() here undoes the page ref obtained in tx_sync_info_get(). * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be * released only upon their completions (or in mlx5e_free_txqsq_descs, * if channel closes). */ - put_page(skb_frag_page(&info.frags[i])); + page_ref_dec(skb_frag_page(&info.frags[i])); return MLX5E_KTLS_SYNC_FAIL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2e27e9d6b820..d0b80b520397 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4295,7 +4295,8 @@ void mlx5e_set_xdp_feature(struct net_device *netdev) struct mlx5e_params *params = &priv->channels.params; xdp_features_t val; - if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) { + if (!netdev->netdev_ops->ndo_bpf || + params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) { xdp_clear_features_flag(netdev); return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 5bf8318cc48b..1d60465cc2ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -36,6 +36,7 @@ #include "en.h" #include "en/port.h" #include "eswitch.h" +#include "lib/mlx5.h" static int mlx5e_test_health_info(struct mlx5e_priv *priv) { @@ -247,6 +248,9 @@ static int mlx5e_cond_loopback(struct mlx5e_priv *priv) if (is_mdev_switchdev_mode(priv->mdev)) return -EOPNOTSUPP; + if (mlx5_get_sd(priv->mdev)) + return -EOPNOTSUPP; + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 5f1adebd9669..d6ff2dc4c19e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2544,8 +2544,11 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, u8 rep_type) { if (atomic_cmpxchg(&rep->rep_data[rep_type].state, - REP_LOADED, REP_REGISTERED) == REP_LOADED) + REP_LOADED, REP_REGISTERED) == REP_LOADED) { + if (rep_type == REP_ETH) + __esw_offloads_unload_rep(esw, rep, REP_IB); esw->offloads.rep_ops[rep_type]->unload(rep); + } } static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c2db0a1c132b..2eabfcc247c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2105,13 +2105,22 @@ lookup_fte_locked(struct mlx5_flow_group *g, fte_tmp = NULL; goto out; } + + nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); + if (!fte_tmp->node.active) { + up_write_ref_node(&fte_tmp->node, false); + + if (take_write) + up_write_ref_node(&g->node, false); + else + up_read_ref_node(&g->node); + tree_put_node(&fte_tmp->node, false); - fte_tmp = NULL; - goto out; + + return NULL; } - nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); out: if (take_write) up_write_ref_node(&g->node, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 81a9232a03e1..7db9cab9bedf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -593,9 +593,11 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) kvfree(pool); } -static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) +static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec, + bool dynamic_vec) { struct mlx5_irq_table *table = dev->priv.irq_table; + int sf_vec_available = sf_vec; int num_sf_ctrl; int err; @@ -616,6 +618,13 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev), MLX5_SFS_PER_CTRL_IRQ); num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl); + if (!dynamic_vec && (num_sf_ctrl + 1) > sf_vec_available) { + mlx5_core_dbg(dev, + "Not enough IRQs for SFs control and completion pool, required=%d avail=%d\n", + num_sf_ctrl + 1, sf_vec_available); + return 0; + } + table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl, "mlx5_sf_ctrl", MLX5_EQ_SHARE_IRQ_MIN_CTRL, @@ -624,9 +633,11 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) err = PTR_ERR(table->sf_ctrl_pool); goto err_pf; } - /* init sf_comp_pool */ + sf_vec_available -= num_sf_ctrl; + + /* init sf_comp_pool, remaining vectors are for the SF completions */ table->sf_comp_pool = irq_pool_alloc(dev, pcif_vec + num_sf_ctrl, - sf_vec - num_sf_ctrl, "mlx5_sf_comp", + sf_vec_available, "mlx5_sf_comp", MLX5_EQ_SHARE_IRQ_MIN_COMP, MLX5_EQ_SHARE_IRQ_MAX_COMP); if (IS_ERR(table->sf_comp_pool)) { @@ -715,6 +726,7 @@ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table) int mlx5_irq_table_create(struct mlx5_core_dev *dev) { int num_eqs = mlx5_max_eq_cap_get(dev); + bool dynamic_vec; int total_vec; int pcif_vec; int req_vec; @@ -724,21 +736,31 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) if (mlx5_core_is_sf(dev)) return 0; + /* PCI PF vectors usage is limited by online cpus, device EQs and + * PCI MSI-X capability. + */ pcif_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1; pcif_vec = min_t(int, pcif_vec, num_eqs); + pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev)); total_vec = pcif_vec; if (mlx5_sf_max_functions(dev)) total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev); total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev)); - pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev)); req_vec = pci_msix_can_alloc_dyn(dev->pdev) ? 1 : total_vec; n = pci_alloc_irq_vectors(dev->pdev, 1, req_vec, PCI_IRQ_MSIX); if (n < 0) return n; - err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec); + /* Further limit vectors of the pools based on platform for non dynamic case */ + dynamic_vec = pci_msix_can_alloc_dyn(dev->pdev); + if (!dynamic_vec) { + pcif_vec = min_t(int, n, pcif_vec); + total_vec = min_t(int, n, total_vec); + } + + err = irq_pools_init(dev, total_vec - pcif_vec, pcif_vec, dynamic_vec); if (err) pci_free_irq_vectors(dev->pdev); diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile index cadd4dac6620..425e8b801265 100644 --- a/drivers/net/ethernet/meta/fbnic/Makefile +++ b/drivers/net/ethernet/meta/fbnic/Makefile @@ -7,7 +7,8 @@ obj-$(CONFIG_FBNIC) += fbnic.o -fbnic-y := fbnic_devlink.o \ +fbnic-y := fbnic_csr.o \ + fbnic_devlink.o \ fbnic_ethtool.o \ fbnic_fw.o \ fbnic_hw_stats.o \ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index 9f9cb9b3e74e..98870cb2b689 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -156,6 +156,9 @@ int fbnic_alloc_irqs(struct fbnic_dev *fbd); void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version, const size_t str_sz); +void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version); +int fbnic_csr_regs_len(struct fbnic_dev *fbd); + enum fbnic_boards { fbnic_board_asic }; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c new file mode 100644 index 000000000000..2118901b25e9 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include "fbnic.h" + +#define FBNIC_BOUNDS(section) { \ + .start = FBNIC_CSR_START_##section, \ + .end = FBNIC_CSR_END_##section + 1, \ +} + +struct fbnic_csr_bounds { + u32 start; + u32 end; +}; + +static const struct fbnic_csr_bounds fbnic_csr_sects[] = { + FBNIC_BOUNDS(INTR), + FBNIC_BOUNDS(INTR_CQ), + FBNIC_BOUNDS(QM_TX), + FBNIC_BOUNDS(QM_RX), + FBNIC_BOUNDS(TCE), + FBNIC_BOUNDS(TCE_RAM), + FBNIC_BOUNDS(TMI), + FBNIC_BOUNDS(PTP), + FBNIC_BOUNDS(RXB), + FBNIC_BOUNDS(RPC), + FBNIC_BOUNDS(FAB), + FBNIC_BOUNDS(MASTER), + FBNIC_BOUNDS(PCS), + FBNIC_BOUNDS(RSFEC), + FBNIC_BOUNDS(MAC_MAC), + FBNIC_BOUNDS(SIG), + FBNIC_BOUNDS(PUL_USER), + FBNIC_BOUNDS(QUEUE), + FBNIC_BOUNDS(RPC_RAM), +}; + +#define FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY 14 +#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64 + +#define FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY 4 +#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32 + +#define FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY 9 +#define FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES 8 + +#define FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY 9 +#define FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES 8 + +#define FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY 9 +#define FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES 8 + +#define FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY 9 +#define FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES 8 + +#define FBNIC_RPC_RSS_TBL_DW_PER_ENTRY 2 +#define FBNIC_RPC_RSS_TBL_NUM_ENTRIES 256 + +static void fbnic_csr_get_regs_rpc_ram(struct fbnic_dev *fbd, u32 **data_p) +{ + u32 start = FBNIC_CSR_START_RPC_RAM; + u32 end = FBNIC_CSR_END_RPC_RAM; + u32 *data = *data_p; + u32 i, j; + + *(data++) = start; + *(data++) = end - 1; + + /* FBNIC_RPC_TCAM_ACT */ + for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_ACT_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_ACT(i, j)); + } + + /* FBNIC_RPC_TCAM_MACDA */ + for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_MACDA_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_MACDA(i, j)); + } + + /* FBNIC_RPC_TCAM_OUTER_IPSRC */ + for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPSRC_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPSRC_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(i, j)); + } + + /* FBNIC_RPC_TCAM_OUTER_IPDST */ + for (i = 0; i < FBNIC_RPC_TCAM_OUTER_IPDST_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_OUTER_IPDST_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(i, j)); + } + + /* FBNIC_RPC_TCAM_IPSRC */ + for (i = 0; i < FBNIC_RPC_TCAM_IPSRC_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_IPSRC_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPSRC(i, j)); + } + + /* FBNIC_RPC_TCAM_IPDST */ + for (i = 0; i < FBNIC_RPC_TCAM_IPDST_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_TCAM_IPDST_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_TCAM_IPDST(i, j)); + } + + /* FBNIC_RPC_RSS_TBL */ + for (i = 0; i < FBNIC_RPC_RSS_TBL_NUM_ENTRIES; i++) { + for (j = 0; j < FBNIC_RPC_RSS_TBL_DW_PER_ENTRY; j++) + *(data++) = rd32(fbd, FBNIC_RPC_RSS_TBL(i, j)); + } + + *data_p = data; +} + +void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version) +{ + const struct fbnic_csr_bounds *bound; + u32 *start = data; + int i, j; + + *regs_version = 1u; + + /* Skip RPC_RAM section which cannot be dumped linearly */ + for (i = 0, bound = fbnic_csr_sects; + i < ARRAY_SIZE(fbnic_csr_sects) - 1; i++, ++bound) { + *(data++) = bound->start; + *(data++) = bound->end - 1; + for (j = bound->start; j < bound->end; j++) + *(data++) = rd32(fbd, j); + } + + /* Dump the RPC_RAM as special case registers */ + fbnic_csr_get_regs_rpc_ram(fbd, &data); + + WARN_ON(data - start != fbnic_csr_regs_len(fbd)); +} + +int fbnic_csr_regs_len(struct fbnic_dev *fbd) +{ + int i, len = 0; + + /* Dump includes start and end information of each section + * which results in an offset of 2 + */ + for (i = 0; i < ARRAY_SIZE(fbnic_csr_sects); i++) + len += fbnic_csr_sects[i].end - fbnic_csr_sects[i].start + 2; + + return len; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index dd407089ca47..f9a531ce9e17 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -665,6 +665,15 @@ enum { #define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0) #define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16) +#define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\ + (0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */ +#define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\ + (0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */ +#define FBNIC_RPC_TCAM_IPSRC(m, n)\ + (0x08c90 + 0x08 * (n) + (m)) /* 0x023240 + 32*n + 4*m */ +#define FBNIC_RPC_TCAM_IPDST(m, n)\ + (0x08cd8 + 0x08 * (n) + (m)) /* 0x023360 + 32*n + 4*m */ + #define FBNIC_RPC_RSS_TBL(n, m) \ (0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */ #define FBNIC_RPC_RSS_TBL_COUNT 2 @@ -683,6 +692,13 @@ enum { #define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */ #define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */ +/* MAC PCS registers */ +#define FBNIC_CSR_START_PCS 0x10000 /* CSR section delimiter */ +#define FBNIC_CSR_END_PCS 0x10668 /* CSR section delimiter */ + +#define FBNIC_CSR_START_RSFEC 0x10800 /* CSR section delimiter */ +#define FBNIC_CSR_END_RSFEC 0x108c8 /* CSR section delimiter */ + /* MAC MAC registers (ASIC only) */ #define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */ #define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index 1117d5a32867..354b5397815f 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -116,8 +116,25 @@ static void fbnic_get_ts_stats(struct net_device *netdev, } } +static void fbnic_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *data) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + fbnic_csr_get_regs(fbn->fbd, data, ®s->version); +} + +static int fbnic_get_regs_len(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32); +} + static const struct ethtool_ops fbnic_ethtool_ops = { .get_drvinfo = fbnic_get_drvinfo, + .get_regs_len = fbnic_get_regs_len, + .get_regs = fbnic_get_regs, .get_ts_info = fbnic_get_ts_info, .get_ts_stats = fbnic_get_ts_stats, .get_eth_mac_stats = fbnic_get_eth_mac_stats, diff --git a/drivers/net/ethernet/microchip/lan969x/Kconfig b/drivers/net/ethernet/microchip/lan969x/Kconfig index 728180d3fa33..c5c6122ae2ec 100644 --- a/drivers/net/ethernet/microchip/lan969x/Kconfig +++ b/drivers/net/ethernet/microchip/lan969x/Kconfig @@ -1,5 +1,5 @@ config LAN969X_SWITCH - tristate "Lan969x switch driver" + bool "Lan969x switch driver" depends on SPARX5_SWITCH help This driver supports the lan969x family of network switch devices. diff --git a/drivers/net/ethernet/microchip/lan969x/Makefile b/drivers/net/ethernet/microchip/lan969x/Makefile index 9a2351b4f111..316405cbbc71 100644 --- a/drivers/net/ethernet/microchip/lan969x/Makefile +++ b/drivers/net/ethernet/microchip/lan969x/Makefile @@ -3,7 +3,7 @@ # Makefile for the Microchip lan969x network device drivers. # -obj-$(CONFIG_LAN969X_SWITCH) += lan969x-switch.o +obj-$(CONFIG_SPARX5_SWITCH) += lan969x-switch.o lan969x-switch-y := lan969x_regs.o lan969x.o lan969x_calendar.o \ lan969x_vcap_ag_api.o lan969x_vcap_impl.o diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 907a482c012f..739707a7b40f 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5228,6 +5228,12 @@ static int r8169_mdio_register(struct rtl8169_private *tp) phy_support_eee(tp->phydev); phy_support_asym_pause(tp->phydev); + /* mimic behavior of r8125/r8126 vendor drivers */ + if (tp->mac_version == RTL_GIGA_MAC_VER_61) + phy_set_eee_broken(tp->phydev, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT); + phy_set_eee_broken(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT); + /* PHY will be woken up in rtl_open() */ phy_suspend(tp->phydev); diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 1d5b33f6c4b5..5307c6ff4e25 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -96,15 +96,7 @@ static void rtl8125_common_config_eee_phy(struct phy_device *phydev) phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); } -static void rtl8125a_config_eee_phy(struct phy_device *phydev) -{ - rtl8168g_config_eee_phy(phydev); - /* disable EEE at 2.5Gbps */ - phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); - rtl8125_common_config_eee_phy(phydev); -} - -static void rtl8125b_config_eee_phy(struct phy_device *phydev) +static void rtl8125_config_eee_phy(struct phy_device *phydev) { rtl8168g_config_eee_phy(phydev); rtl8125_common_config_eee_phy(phydev); @@ -1066,7 +1058,7 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, rtl8168g_enable_gphy_10m(phydev); rtl8168g_disable_aldps(phydev); - rtl8125a_config_eee_phy(phydev); + rtl8125_config_eee_phy(phydev); } static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, @@ -1106,7 +1098,7 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, rtl8125_legacy_force_mode(phydev); rtl8168g_disable_aldps(phydev); - rtl8125b_config_eee_phy(phydev); + rtl8125_config_eee_phy(phydev); } static void rtl8125d_hw_phy_config(struct rtl8169_private *tp, @@ -1116,7 +1108,7 @@ static void rtl8125d_hw_phy_config(struct rtl8169_private *tp, rtl8168g_enable_gphy_10m(phydev); rtl8125_legacy_force_mode(phydev); rtl8168g_disable_aldps(phydev); - rtl8125b_config_eee_phy(phydev); + rtl8125_config_eee_phy(phydev); } static void rtl8126a_hw_phy_config(struct rtl8169_private *tp, diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c index 5c2551369812..6c3b74000d3b 100644 --- a/drivers/net/ethernet/sfc/ef100_ethtool.c +++ b/drivers/net/ethernet/sfc/ef100_ethtool.c @@ -59,6 +59,7 @@ const struct ethtool_ops ef100_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, .rxfh_per_ctx_key = true, + .cap_rss_rxnfc_adds = true, .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index bb1930818beb..83d715544f7f 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -263,6 +263,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, .rxfh_per_ctx_key = true, + .cap_rss_rxnfc_adds = true, .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 684489156dce..1367fa5c9b8e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -549,8 +549,12 @@ extern const struct stmmac_desc_ops ndesc_ops; struct mac_device_info; extern const struct stmmac_hwtimestamp stmmac_ptp; +extern const struct stmmac_hwtimestamp dwmac1000_ptp; extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; +extern const struct ptp_clock_info stmmac_ptp_clock_ops; +extern const struct ptp_clock_info dwmac1000_ptp_clock_ops; + struct mac_link { u32 caps; u32 speed_mask; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index 230e79658c54..a433526dcbe8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -108,7 +108,12 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (IS_ERR(dwmac->tx_clk)) return PTR_ERR(dwmac->tx_clk); - clk_prepare_enable(dwmac->tx_clk); + ret = clk_prepare_enable(dwmac->tx_clk); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable tx_clk\n"); + return ret; + } /* Check and configure TX clock rate */ rate = clk_get_rate(dwmac->tx_clk); @@ -119,7 +124,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set tx_clk\n"); - return ret; + goto err_tx_clk_disable; } } } @@ -133,7 +138,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set clk_ptp_ref\n"); - return ret; + goto err_tx_clk_disable; } } } @@ -149,12 +154,15 @@ static int intel_eth_plat_probe(struct platform_device *pdev) } ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); - if (ret) { - clk_disable_unprepare(dwmac->tx_clk); - return ret; - } + if (ret) + goto err_tx_clk_disable; return 0; + +err_tx_clk_disable: + if (dwmac->data->tx_clk_en) + clk_disable_unprepare(dwmac->tx_clk); + return ret; } static void intel_eth_plat_remove(struct platform_device *pdev) @@ -162,7 +170,8 @@ static void intel_eth_plat_remove(struct platform_device *pdev) struct intel_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev); stmmac_pltfr_remove(pdev); - clk_disable_unprepare(dwmac->tx_clk); + if (dwmac->data->tx_clk_en) + clk_disable_unprepare(dwmac->tx_clk); } static struct platform_driver intel_eth_plat_driver = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index f8ca81675407..c9636832a570 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -589,9 +589,9 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev, plat->mac_interface = priv_plat->phy_mode; if (priv_plat->mac_wol) - plat->flags |= STMMAC_FLAG_USE_PHY_WOL; - else plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL; + else + plat->flags |= STMMAC_FLAG_USE_PHY_WOL; plat->riwt_off = 1; plat->maxmtu = ETH_DATA_LEN; plat->host_dma_width = priv_plat->variant->dma_bit_mask; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 0745117d5872..248b30d7b864 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -485,6 +485,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->pcs_init = socfpga_dwmac_pcs_init; plat_dat->pcs_exit = socfpga_dwmac_pcs_exit; plat_dat->select_pcs = socfpga_dwmac_select_pcs; + plat_dat->has_gmac = true; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 4296ddda8aaa..600fea8f712f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -329,5 +329,17 @@ enum rtc_control { #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 #define GMAC_EXTHASH_BASE 0x500 +/* PTP and timestamping registers */ + +#define GMAC3_X_ATSNS GENMASK(19, 16) +#define GMAC3_X_ATSNS_SHIFT 16 + +#define GMAC_PTP_TCR_ATSFC BIT(24) +#define GMAC_PTP_TCR_ATSEN0 BIT(25) + +#define GMAC3_X_TIMESTAMP_STATUS 0x28 +#define GMAC_PTP_ATNR 0x30 +#define GMAC_PTP_ATSR 0x34 + extern const struct stmmac_dma_ops dwmac1000_dma_ops; #endif /* __DWMAC1000_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index d413d76a8936..96bcda0856ec 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -18,6 +18,7 @@ #include <linux/io.h> #include "stmmac.h" #include "stmmac_pcs.h" +#include "stmmac_ptp.h" #include "dwmac1000.h" static void dwmac1000_core_init(struct mac_device_info *hw, @@ -551,3 +552,103 @@ int dwmac1000_setup(struct stmmac_priv *priv) return 0; } + +/* DWMAC 1000 HW Timestaming ops */ + +void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time) +{ + u64 ns; + + ns = readl(ptpaddr + GMAC_PTP_ATNR); + ns += readl(ptpaddr + GMAC_PTP_ATSR) * NSEC_PER_SEC; + + *ptp_time = ns; +} + +void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv) +{ + struct ptp_clock_event event; + u32 ts_status, num_snapshot; + unsigned long flags; + u64 ptp_time; + int i; + + /* Clears the timestamp interrupt */ + ts_status = readl(priv->ptpaddr + GMAC3_X_TIMESTAMP_STATUS); + + if (!(priv->plat->flags & STMMAC_FLAG_EXT_SNAPSHOT_EN)) + return; + + num_snapshot = (ts_status & GMAC3_X_ATSNS) >> GMAC3_X_ATSNS_SHIFT; + + for (i = 0; i < num_snapshot; i++) { + read_lock_irqsave(&priv->ptp_lock, flags); + stmmac_get_ptptime(priv, priv->ptpaddr, &ptp_time); + read_unlock_irqrestore(&priv->ptp_lock, flags); + + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ptp_time; + ptp_clock_event(priv->ptp_clock, &event); + } +} + +/* DWMAC 1000 ptp_clock_info ops */ + +static void dwmac1000_timestamp_interrupt_cfg(struct stmmac_priv *priv, bool en) +{ + void __iomem *ioaddr = priv->ioaddr; + + u32 intr_mask = readl(ioaddr + GMAC_INT_MASK); + + if (en) + intr_mask &= ~GMAC_INT_DISABLE_TIMESTAMP; + else + intr_mask |= GMAC_INT_DISABLE_TIMESTAMP; + + writel(intr_mask, ioaddr + GMAC_INT_MASK); +} + +int dwmac1000_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + void __iomem *ptpaddr = priv->ptpaddr; + int ret = -EOPNOTSUPP; + u32 tcr_val; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + mutex_lock(&priv->aux_ts_lock); + tcr_val = readl(ptpaddr + PTP_TCR); + + if (on) { + tcr_val |= GMAC_PTP_TCR_ATSEN0; + tcr_val |= GMAC_PTP_TCR_ATSFC; + priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN; + } else { + tcr_val &= ~GMAC_PTP_TCR_ATSEN0; + priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN; + } + + netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n", + on ? "enabled" : "disabled"); + writel(tcr_val, ptpaddr + PTP_TCR); + + /* wait for auxts fifo clear to finish */ + ret = readl_poll_timeout(ptpaddr + PTP_TCR, tcr_val, + !(tcr_val & GMAC_PTP_TCR_ATSFC), + 10, 10000); + + mutex_unlock(&priv->aux_ts_lock); + + dwmac1000_timestamp_interrupt_cfg(priv, on); + break; + + default: + break; + } + + return ret; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index cfc50289aed6..a72d336a8350 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -113,6 +113,7 @@ static const struct stmmac_hwif_entry { const void *dma; const void *mac; const void *hwtimestamp; + const void *ptp; const void *mode; const void *tc; const void *mmc; @@ -133,7 +134,8 @@ static const struct stmmac_hwif_entry { .desc = NULL, .dma = &dwmac100_dma_ops, .mac = &dwmac100_ops, - .hwtimestamp = &stmmac_ptp, + .hwtimestamp = &dwmac1000_ptp, + .ptp = &dwmac1000_ptp_clock_ops, .mode = NULL, .tc = NULL, .mmc = &dwmac_mmc_ops, @@ -151,7 +153,8 @@ static const struct stmmac_hwif_entry { .desc = NULL, .dma = &dwmac1000_dma_ops, .mac = &dwmac1000_ops, - .hwtimestamp = &stmmac_ptp, + .hwtimestamp = &dwmac1000_ptp, + .ptp = &dwmac1000_ptp_clock_ops, .mode = NULL, .tc = NULL, .mmc = &dwmac_mmc_ops, @@ -171,6 +174,7 @@ static const struct stmmac_hwif_entry { .dma = &dwmac4_dma_ops, .mac = &dwmac4_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = NULL, .tc = &dwmac4_tc_ops, .mmc = &dwmac_mmc_ops, @@ -192,6 +196,7 @@ static const struct stmmac_hwif_entry { .dma = &dwmac4_dma_ops, .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = &dwmac4_ring_mode_ops, .tc = &dwmac510_tc_ops, .mmc = &dwmac_mmc_ops, @@ -213,6 +218,7 @@ static const struct stmmac_hwif_entry { .dma = &dwmac410_dma_ops, .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = &dwmac4_ring_mode_ops, .tc = &dwmac510_tc_ops, .mmc = &dwmac_mmc_ops, @@ -234,6 +240,7 @@ static const struct stmmac_hwif_entry { .dma = &dwmac410_dma_ops, .mac = &dwmac510_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = &dwmac4_ring_mode_ops, .tc = &dwmac510_tc_ops, .mmc = &dwmac_mmc_ops, @@ -256,6 +263,7 @@ static const struct stmmac_hwif_entry { .dma = &dwxgmac210_dma_ops, .mac = &dwxgmac210_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = NULL, .tc = &dwxgmac_tc_ops, .mmc = &dwxgmac_mmc_ops, @@ -278,6 +286,7 @@ static const struct stmmac_hwif_entry { .dma = &dwxgmac210_dma_ops, .mac = &dwxlgmac2_ops, .hwtimestamp = &stmmac_ptp, + .ptp = &stmmac_ptp_clock_ops, .mode = NULL, .tc = &dwxgmac_tc_ops, .mmc = &dwxgmac_mmc_ops, @@ -362,6 +371,8 @@ int stmmac_hwif_init(struct stmmac_priv *priv) priv->fpe_cfg.reg = entry->regs.fpe_reg; priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; + memcpy(&priv->ptp_clock_ops, entry->ptp, + sizeof(struct ptp_clock_info)); if (entry->est) priv->estaddr = priv->ioaddr + entry->regs.est_off; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 5ef52ef2698f..0f59aa982604 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -18,9 +18,22 @@ #include "dwmac4.h" #include "stmmac.h" +#define STMMAC_HWTS_CFG_MASK (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ + PTP_TCR_TSINIT | PTP_TCR_TSUPDT | \ + PTP_TCR_TSCTRLSSR | PTP_TCR_SNAPTYPSEL_1 | \ + PTP_TCR_TSIPV4ENA | PTP_TCR_TSIPV6ENA | \ + PTP_TCR_TSEVNTENA | PTP_TCR_TSMSTRENA | \ + PTP_TCR_TSVER2ENA | PTP_TCR_TSIPENA | \ + PTP_TCR_TSTRIG | PTP_TCR_TSENALL) + static void config_hw_tstamping(void __iomem *ioaddr, u32 data) { - writel(data, ioaddr + PTP_TCR); + u32 regval = readl(ioaddr + PTP_TCR); + + regval &= ~STMMAC_HWTS_CFG_MASK; + regval |= data; + + writel(regval, ioaddr + PTP_TCR); } static void config_sub_second_increment(void __iomem *ioaddr, @@ -269,3 +282,14 @@ const struct stmmac_hwtimestamp stmmac_ptp = { .timestamp_interrupt = timestamp_interrupt, .hwtstamp_correct_latency = hwtstamp_correct_latency, }; + +const struct stmmac_hwtimestamp dwmac1000_ptp = { + .config_hw_tstamping = config_hw_tstamping, + .init_systime = init_systime, + .config_sub_second_increment = config_sub_second_increment, + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, + .get_ptptime = dwmac1000_get_ptptime, + .timestamp_interrupt = dwmac1000_timestamp_interrupt, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index a6b1de9a251d..429b2d357813 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -9,7 +9,6 @@ *******************************************************************************/ #include "stmmac.h" #include "stmmac_ptp.h" -#include "dwmac4.h" /** * stmmac_adjust_freq @@ -265,7 +264,7 @@ static int stmmac_getcrosststamp(struct ptp_clock_info *ptp, } /* structure describing a PTP hardware clock */ -static struct ptp_clock_info stmmac_ptp_clock_ops = { +const struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac ptp", .max_adj = 62500000, @@ -282,6 +281,24 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = { .getcrosststamp = stmmac_getcrosststamp, }; +/* structure describing a PTP hardware clock */ +const struct ptp_clock_info dwmac1000_ptp_clock_ops = { + .owner = THIS_MODULE, + .name = "stmmac ptp", + .max_adj = 62500000, + .n_alarm = 0, + .n_ext_ts = 1, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfine = stmmac_adjust_freq, + .adjtime = stmmac_adjust_time, + .gettime64 = stmmac_get_time, + .settime64 = stmmac_set_time, + .enable = dwmac1000_ptp_enable, + .getcrosststamp = stmmac_getcrosststamp, +}; + /** * stmmac_ptp_register * @priv: driver private structure @@ -298,20 +315,25 @@ void stmmac_ptp_register(struct stmmac_priv *priv) priv->pps[i].available = true; } - if (priv->plat->ptp_max_adj) - stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; - /* Calculate the clock domain crossing (CDC) error if necessary */ priv->plat->cdc_error_adj = 0; if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate; - stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; - stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; + /* Update the ptp clock parameters based on feature discovery, when + * available + */ + if (priv->dma_cap.pps_out_num) + priv->ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + + if (priv->dma_cap.aux_snapshot_n) + priv->ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n; + + if (priv->plat->ptp_max_adj) + priv->ptp_clock_ops.max_adj = priv->plat->ptp_max_adj; rwlock_init(&priv->ptp_lock); mutex_init(&priv->aux_ts_lock); - priv->ptp_clock_ops = stmmac_ptp_clock_ops; priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops, priv->device); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index fce3fba2ffd2..4cc70480ce0f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -94,4 +94,14 @@ enum aux_snapshot { AUX_SNAPSHOT3 = 0x80, }; +struct ptp_clock_info; +struct ptp_clock_request; +struct stmmac_priv; + +int dwmac1000_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on); + +void dwmac1000_get_ptptime(void __iomem *ptpaddr, u64 *ptp_time); +void dwmac1000_timestamp_interrupt(struct stmmac_priv *priv); + #endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index b4d70c6e0cff..c568c84a032b 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -16,6 +16,7 @@ #include <linux/if_hsr.h> #include <linux/if_vlan.h> #include <linux/interrupt.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> @@ -411,6 +412,8 @@ static int prueth_perout_enable(void *clockops_data, struct prueth_emac *emac = clockops_data; u32 reduction_factor = 0, offset = 0; struct timespec64 ts; + u64 current_cycle; + u64 start_offset; u64 ns_period; if (!on) @@ -449,8 +452,14 @@ static int prueth_perout_enable(void *clockops_data, writel(reduction_factor, emac->prueth->shram.va + TIMESYNC_FW_WC_SYNCOUT_REDUCTION_FACTOR_OFFSET); - writel(0, emac->prueth->shram.va + - TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); + current_cycle = icssg_read_time(emac->prueth->shram.va + + TIMESYNC_FW_WC_CYCLECOUNT_OFFSET); + + /* Rounding of current_cycle count to next second */ + start_offset = roundup(current_cycle, MSEC_PER_SEC); + + hi_lo_writeq(start_offset, emac->prueth->shram.va + + TIMESYNC_FW_WC_SYNCOUT_START_TIME_CYCLECOUNT_OFFSET); return 0; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index 8722bb4a268a..f5c1d473e9f9 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -330,6 +330,18 @@ static inline int prueth_emac_slice(struct prueth_emac *emac) extern const struct ethtool_ops icssg_ethtool_ops; extern const struct dev_pm_ops prueth_dev_pm_ops; +static inline u64 icssg_read_time(const void __iomem *addr) +{ + u32 low, high; + + do { + high = readl(addr + 4); + low = readl(addr); + } while (high != readl(addr + 4)); + + return low + ((u64)high << 32); +} + /* Classifier helpers */ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac); void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac); diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index 2c37957478fb..89dc4c401a8d 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -437,13 +437,15 @@ static void mse102x_tx_work(struct work_struct *work) mse = &mses->mse102x; while ((txb = skb_dequeue(&mse->txq))) { + unsigned int len = max_t(unsigned int, txb->len, ETH_ZLEN); + mutex_lock(&mses->lock); ret = mse102x_tx_pkt_spi(mse, txb, work_timeout); mutex_unlock(&mses->lock); if (ret) { mse->ndev->stats.tx_dropped++; } else { - mse->ndev->stats.tx_bytes += txb->len; + mse->ndev->stats.tx_bytes += len; mse->ndev->stats.tx_packets++; } diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c index e08c90ac0c6e..f67a4d4005e7 100644 --- a/drivers/net/mdio.c +++ b/drivers/net/mdio.c @@ -167,178 +167,6 @@ static u32 mdio45_get_an(const struct mdio_if_info *mdio, u16 addr) } /** - * mdio45_ethtool_gset_npage - get settings for ETHTOOL_GSET - * @mdio: MDIO interface - * @ecmd: Ethtool request structure - * @npage_adv: Modes currently advertised on next pages - * @npage_lpa: Modes advertised by link partner on next pages - * - * The @ecmd parameter is expected to have been cleared before calling - * mdio45_ethtool_gset_npage(). - * - * Since the CSRs for auto-negotiation using next pages are not fully - * standardised, this function does not attempt to decode them. The - * caller must pass them in. - */ -void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, - struct ethtool_cmd *ecmd, - u32 npage_adv, u32 npage_lpa) -{ - int reg; - u32 speed; - - BUILD_BUG_ON(MDIO_SUPPORTS_C22 != ETH_MDIO_SUPPORTS_C22); - BUILD_BUG_ON(MDIO_SUPPORTS_C45 != ETH_MDIO_SUPPORTS_C45); - - ecmd->transceiver = XCVR_INTERNAL; - ecmd->phy_address = mdio->prtad; - ecmd->mdio_support = - mdio->mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22); - - reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, - MDIO_CTRL2); - switch (reg & MDIO_PMA_CTRL2_TYPE) { - case MDIO_PMA_CTRL2_10GBT: - case MDIO_PMA_CTRL2_1000BT: - case MDIO_PMA_CTRL2_100BTX: - case MDIO_PMA_CTRL2_10BT: - ecmd->port = PORT_TP; - ecmd->supported = SUPPORTED_TP; - reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, - MDIO_SPEED); - if (reg & MDIO_SPEED_10G) - ecmd->supported |= SUPPORTED_10000baseT_Full; - if (reg & MDIO_PMA_SPEED_1000) - ecmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_1000baseT_Half); - if (reg & MDIO_PMA_SPEED_100) - ecmd->supported |= (SUPPORTED_100baseT_Full | - SUPPORTED_100baseT_Half); - if (reg & MDIO_PMA_SPEED_10) - ecmd->supported |= (SUPPORTED_10baseT_Full | - SUPPORTED_10baseT_Half); - ecmd->advertising = ADVERTISED_TP; - break; - - case MDIO_PMA_CTRL2_10GBCX4: - ecmd->port = PORT_OTHER; - ecmd->supported = 0; - ecmd->advertising = 0; - break; - - case MDIO_PMA_CTRL2_10GBKX4: - case MDIO_PMA_CTRL2_10GBKR: - case MDIO_PMA_CTRL2_1000BKX: - ecmd->port = PORT_OTHER; - ecmd->supported = SUPPORTED_Backplane; - reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, - MDIO_PMA_EXTABLE); - if (reg & MDIO_PMA_EXTABLE_10GBKX4) - ecmd->supported |= SUPPORTED_10000baseKX4_Full; - if (reg & MDIO_PMA_EXTABLE_10GBKR) - ecmd->supported |= SUPPORTED_10000baseKR_Full; - if (reg & MDIO_PMA_EXTABLE_1000BKX) - ecmd->supported |= SUPPORTED_1000baseKX_Full; - reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, - MDIO_PMA_10GBR_FECABLE); - if (reg & MDIO_PMA_10GBR_FECABLE_ABLE) - ecmd->supported |= SUPPORTED_10000baseR_FEC; - ecmd->advertising = ADVERTISED_Backplane; - break; - - /* All the other defined modes are flavours of optical */ - default: - ecmd->port = PORT_FIBRE; - ecmd->supported = SUPPORTED_FIBRE; - ecmd->advertising = ADVERTISED_FIBRE; - break; - } - - if (mdio->mmds & MDIO_DEVS_AN) { - ecmd->supported |= SUPPORTED_Autoneg; - reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, - MDIO_CTRL1); - if (reg & MDIO_AN_CTRL1_ENABLE) { - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->advertising |= - ADVERTISED_Autoneg | - mdio45_get_an(mdio, MDIO_AN_ADVERTISE) | - npage_adv; - } else { - ecmd->autoneg = AUTONEG_DISABLE; - } - } else { - ecmd->autoneg = AUTONEG_DISABLE; - } - - if (ecmd->autoneg) { - u32 modes = 0; - int an_stat = mdio->mdio_read(mdio->dev, mdio->prtad, - MDIO_MMD_AN, MDIO_STAT1); - - /* If AN is complete and successful, report best common - * mode, otherwise report best advertised mode. */ - if (an_stat & MDIO_AN_STAT1_COMPLETE) { - ecmd->lp_advertising = - mdio45_get_an(mdio, MDIO_AN_LPA) | npage_lpa; - if (an_stat & MDIO_AN_STAT1_LPABLE) - ecmd->lp_advertising |= ADVERTISED_Autoneg; - modes = ecmd->advertising & ecmd->lp_advertising; - } - if ((modes & ~ADVERTISED_Autoneg) == 0) - modes = ecmd->advertising; - - if (modes & (ADVERTISED_10000baseT_Full | - ADVERTISED_10000baseKX4_Full | - ADVERTISED_10000baseKR_Full)) { - speed = SPEED_10000; - ecmd->duplex = DUPLEX_FULL; - } else if (modes & (ADVERTISED_1000baseT_Full | - ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseKX_Full)) { - speed = SPEED_1000; - ecmd->duplex = !(modes & ADVERTISED_1000baseT_Half); - } else if (modes & (ADVERTISED_100baseT_Full | - ADVERTISED_100baseT_Half)) { - speed = SPEED_100; - ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full); - } else { - speed = SPEED_10; - ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full); - } - } else { - /* Report forced settings */ - reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, - MDIO_CTRL1); - speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1) - * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10)); - ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX || - speed == SPEED_10000); - } - - ethtool_cmd_speed_set(ecmd, speed); - - /* 10GBASE-T MDI/MDI-X */ - if (ecmd->port == PORT_TP - && (ethtool_cmd_speed(ecmd) == SPEED_10000)) { - switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD, - MDIO_PMA_10GBT_SWAPPOL)) { - case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX: - ecmd->eth_tp_mdix = ETH_TP_MDI; - break; - case 0: - ecmd->eth_tp_mdix = ETH_TP_MDI_X; - break; - default: - /* It's complicated... */ - ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; - break; - } - } -} -EXPORT_SYMBOL(mdio45_ethtool_gset_npage); - -/** * mdio45_ethtool_ksettings_get_npage - get settings for ETHTOOL_GLINKSETTINGS * @mdio: MDIO interface * @cmd: Ethtool request structure diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index ee3ea0b56d48..15828f4710a9 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -266,22 +266,7 @@ config MAXLINEAR_GPHY Support for the Maxlinear GPY115, GPY211, GPY212, GPY215, GPY241, GPY245 PHYs. -config MEDIATEK_GE_PHY - tristate "MediaTek Gigabit Ethernet PHYs" - help - Supports the MediaTek Gigabit Ethernet PHYs. - -config MEDIATEK_GE_SOC_PHY - tristate "MediaTek SoC Ethernet PHYs" - depends on (ARM64 && ARCH_MEDIATEK) || COMPILE_TEST - depends on NVMEM_MTK_EFUSE - help - Supports MediaTek SoC built-in Gigabit Ethernet PHYs. - - Include support for built-in Ethernet PHYs which are present in - the MT7981 and MT7988 SoCs. These PHYs need calibration data - present in the SoCs efuse and will dynamically calibrate VCM - (common-mode voltage) during startup. +source "drivers/net/phy/mediatek/Kconfig" config MICREL_PHY tristate "Micrel PHYs" diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 90f886844381..e6145153e837 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -74,8 +74,7 @@ obj-$(CONFIG_MARVELL_PHY) += marvell.o obj-$(CONFIG_MARVELL_88Q2XXX_PHY) += marvell-88q2xxx.o obj-$(CONFIG_MARVELL_88X2222_PHY) += marvell-88x2222.o obj-$(CONFIG_MAXLINEAR_GPHY) += mxl-gpy.o -obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o -obj-$(CONFIG_MEDIATEK_GE_SOC_PHY) += mediatek-ge-soc.o +obj-y += mediatek/ obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_MICREL_PHY) += micrel.o diff --git a/drivers/net/phy/mediatek/Kconfig b/drivers/net/phy/mediatek/Kconfig new file mode 100644 index 000000000000..2a8ac5aed0f8 --- /dev/null +++ b/drivers/net/phy/mediatek/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0-only +config MTK_NET_PHYLIB + tristate + +config MEDIATEK_GE_PHY + tristate "MediaTek Gigabit Ethernet PHYs" + select MTK_NET_PHYLIB + help + Supports the MediaTek non-built-in Gigabit Ethernet PHYs. + + Non-built-in Gigabit Ethernet PHYs include mt7530/mt7531. + You may find mt7530 inside mt7621. This driver shares some + common operations with MediaTek SoC built-in Gigabit + Ethernet PHYs. + +config MEDIATEK_GE_SOC_PHY + tristate "MediaTek SoC Ethernet PHYs" + depends on (ARM64 && ARCH_MEDIATEK) || COMPILE_TEST + depends on NVMEM_MTK_EFUSE + select MTK_NET_PHYLIB + help + Supports MediaTek SoC built-in Gigabit Ethernet PHYs. + + Include support for built-in Ethernet PHYs which are present in + the MT7981 and MT7988 SoCs. These PHYs need calibration data + present in the SoCs efuse and will dynamically calibrate VCM + (common-mode voltage) during startup. diff --git a/drivers/net/phy/mediatek/Makefile b/drivers/net/phy/mediatek/Makefile new file mode 100644 index 000000000000..814879d0abe5 --- /dev/null +++ b/drivers/net/phy/mediatek/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_MTK_NET_PHYLIB) += mtk-phy-lib.o +obj-$(CONFIG_MEDIATEK_GE_PHY) += mtk-ge.o +obj-$(CONFIG_MEDIATEK_GE_SOC_PHY) += mtk-ge-soc.o diff --git a/drivers/net/phy/mediatek-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c index a931832b1418..38dc898eaf7b 100644 --- a/drivers/net/phy/mediatek-ge-soc.c +++ b/drivers/net/phy/mediatek/mtk-ge-soc.c @@ -8,6 +8,8 @@ #include <linux/phy.h> #include <linux/regmap.h> +#include "mtk.h" + #define MTK_GPHY_ID_MT7981 0x03a29461 #define MTK_GPHY_ID_MT7988 0x03a29481 @@ -210,41 +212,6 @@ #define MTK_PHY_DA_TX_R50_PAIR_D 0x540 /* Registers on MDIO_MMD_VEND2 */ -#define MTK_PHY_LED0_ON_CTRL 0x24 -#define MTK_PHY_LED1_ON_CTRL 0x26 -#define MTK_PHY_LED_ON_MASK GENMASK(6, 0) -#define MTK_PHY_LED_ON_LINK1000 BIT(0) -#define MTK_PHY_LED_ON_LINK100 BIT(1) -#define MTK_PHY_LED_ON_LINK10 BIT(2) -#define MTK_PHY_LED_ON_LINK (MTK_PHY_LED_ON_LINK10 |\ - MTK_PHY_LED_ON_LINK100 |\ - MTK_PHY_LED_ON_LINK1000) -#define MTK_PHY_LED_ON_LINKDOWN BIT(3) -#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */ -#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */ -#define MTK_PHY_LED_ON_FORCE_ON BIT(6) -#define MTK_PHY_LED_ON_POLARITY BIT(14) -#define MTK_PHY_LED_ON_ENABLE BIT(15) - -#define MTK_PHY_LED0_BLINK_CTRL 0x25 -#define MTK_PHY_LED1_BLINK_CTRL 0x27 -#define MTK_PHY_LED_BLINK_1000TX BIT(0) -#define MTK_PHY_LED_BLINK_1000RX BIT(1) -#define MTK_PHY_LED_BLINK_100TX BIT(2) -#define MTK_PHY_LED_BLINK_100RX BIT(3) -#define MTK_PHY_LED_BLINK_10TX BIT(4) -#define MTK_PHY_LED_BLINK_10RX BIT(5) -#define MTK_PHY_LED_BLINK_RX (MTK_PHY_LED_BLINK_10RX |\ - MTK_PHY_LED_BLINK_100RX |\ - MTK_PHY_LED_BLINK_1000RX) -#define MTK_PHY_LED_BLINK_TX (MTK_PHY_LED_BLINK_10TX |\ - MTK_PHY_LED_BLINK_100TX |\ - MTK_PHY_LED_BLINK_1000TX) -#define MTK_PHY_LED_BLINK_COLLISION BIT(6) -#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7) -#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8) -#define MTK_PHY_LED_BLINK_FORCE_BLINK BIT(9) - #define MTK_PHY_LED1_DEFAULT_POLARITIES BIT(1) #define MTK_PHY_RG_BG_RASEL 0x115 @@ -299,29 +266,11 @@ enum CAL_MODE { SW_M }; -#define MTK_PHY_LED_STATE_FORCE_ON 0 -#define MTK_PHY_LED_STATE_FORCE_BLINK 1 -#define MTK_PHY_LED_STATE_NETDEV 2 - -struct mtk_socphy_priv { - unsigned long led_state; -}; - struct mtk_socphy_shared { u32 boottrap; struct mtk_socphy_priv priv[4]; }; -static int mtk_socphy_read_page(struct phy_device *phydev) -{ - return __phy_read(phydev, MTK_EXT_PAGE_ACCESS); -} - -static int mtk_socphy_write_page(struct phy_device *phydev, int page) -{ - return __phy_write(phydev, MTK_EXT_PAGE_ACCESS, page); -} - /* One calibration cycle consists of: * 1.Set DA_CALIN_FLAG high to start calibration. Keep it high * until AD_CAL_COMP is ready to output calibration result. @@ -1172,76 +1121,23 @@ static int mt798x_phy_config_init(struct phy_device *phydev) return mt798x_phy_calibration(phydev); } -static int mt798x_phy_hw_led_on_set(struct phy_device *phydev, u8 index, - bool on) -{ - unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0); - struct mtk_socphy_priv *priv = phydev->priv; - bool changed; - - if (on) - changed = !test_and_set_bit(bit_on, &priv->led_state); - else - changed = !!test_and_clear_bit(bit_on, &priv->led_state); - - changed |= !!test_and_clear_bit(MTK_PHY_LED_STATE_NETDEV + - (index ? 16 : 0), &priv->led_state); - if (changed) - return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ? - MTK_PHY_LED1_ON_CTRL : - MTK_PHY_LED0_ON_CTRL, - MTK_PHY_LED_ON_MASK, - on ? MTK_PHY_LED_ON_FORCE_ON : 0); - else - return 0; -} - -static int mt798x_phy_hw_led_blink_set(struct phy_device *phydev, u8 index, - bool blinking) -{ - unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + - (index ? 16 : 0); - struct mtk_socphy_priv *priv = phydev->priv; - bool changed; - - if (blinking) - changed = !test_and_set_bit(bit_blink, &priv->led_state); - else - changed = !!test_and_clear_bit(bit_blink, &priv->led_state); - - changed |= !!test_bit(MTK_PHY_LED_STATE_NETDEV + - (index ? 16 : 0), &priv->led_state); - if (changed) - return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ? - MTK_PHY_LED1_BLINK_CTRL : - MTK_PHY_LED0_BLINK_CTRL, - blinking ? - MTK_PHY_LED_BLINK_FORCE_BLINK : 0); - else - return 0; -} - static int mt798x_phy_led_blink_set(struct phy_device *phydev, u8 index, unsigned long *delay_on, unsigned long *delay_off) { bool blinking = false; - int err = 0; - - if (index > 1) - return -EINVAL; + int err; - if (delay_on && delay_off && (*delay_on > 0) && (*delay_off > 0)) { - blinking = true; - *delay_on = 50; - *delay_off = 50; - } + err = mtk_phy_led_num_dly_cfg(index, delay_on, delay_off, &blinking); + if (err < 0) + return err; - err = mt798x_phy_hw_led_blink_set(phydev, index, blinking); + err = mtk_phy_hw_led_blink_set(phydev, index, blinking); if (err) return err; - return mt798x_phy_hw_led_on_set(phydev, index, false); + return mtk_phy_hw_led_on_set(phydev, index, MTK_GPHY_LED_ON_MASK, + false); } static int mt798x_phy_led_brightness_set(struct phy_device *phydev, @@ -1249,11 +1145,12 @@ static int mt798x_phy_led_brightness_set(struct phy_device *phydev, { int err; - err = mt798x_phy_hw_led_blink_set(phydev, index, false); + err = mtk_phy_hw_led_blink_set(phydev, index, false); if (err) return err; - return mt798x_phy_hw_led_on_set(phydev, index, (value != LED_OFF)); + return mtk_phy_hw_led_on_set(phydev, index, MTK_GPHY_LED_ON_MASK, + (value != LED_OFF)); } static const unsigned long supported_triggers = @@ -1269,155 +1166,26 @@ static const unsigned long supported_triggers = static int mt798x_phy_led_hw_is_supported(struct phy_device *phydev, u8 index, unsigned long rules) { - if (index > 1) - return -EINVAL; - - /* All combinations of the supported triggers are allowed */ - if (rules & ~supported_triggers) - return -EOPNOTSUPP; - - return 0; -}; + return mtk_phy_led_hw_is_supported(phydev, index, rules, + supported_triggers); +} static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index, unsigned long *rules) { - unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + - (index ? 16 : 0); - unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0); - unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0); - struct mtk_socphy_priv *priv = phydev->priv; - int on, blink; - - if (index > 1) - return -EINVAL; - - on = phy_read_mmd(phydev, MDIO_MMD_VEND2, - index ? MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL); - - if (on < 0) - return -EIO; - - blink = phy_read_mmd(phydev, MDIO_MMD_VEND2, - index ? MTK_PHY_LED1_BLINK_CTRL : - MTK_PHY_LED0_BLINK_CTRL); - if (blink < 0) - return -EIO; - - if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | - MTK_PHY_LED_ON_HDX | MTK_PHY_LED_ON_LINKDOWN)) || - (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX))) - set_bit(bit_netdev, &priv->led_state); - else - clear_bit(bit_netdev, &priv->led_state); - - if (on & MTK_PHY_LED_ON_FORCE_ON) - set_bit(bit_on, &priv->led_state); - else - clear_bit(bit_on, &priv->led_state); - - if (blink & MTK_PHY_LED_BLINK_FORCE_BLINK) - set_bit(bit_blink, &priv->led_state); - else - clear_bit(bit_blink, &priv->led_state); - - if (!rules) - return 0; - - if (on & MTK_PHY_LED_ON_LINK) - *rules |= BIT(TRIGGER_NETDEV_LINK); - - if (on & MTK_PHY_LED_ON_LINK10) - *rules |= BIT(TRIGGER_NETDEV_LINK_10); - - if (on & MTK_PHY_LED_ON_LINK100) - *rules |= BIT(TRIGGER_NETDEV_LINK_100); - - if (on & MTK_PHY_LED_ON_LINK1000) - *rules |= BIT(TRIGGER_NETDEV_LINK_1000); - - if (on & MTK_PHY_LED_ON_FDX) - *rules |= BIT(TRIGGER_NETDEV_FULL_DUPLEX); - - if (on & MTK_PHY_LED_ON_HDX) - *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX); - - if (blink & MTK_PHY_LED_BLINK_RX) - *rules |= BIT(TRIGGER_NETDEV_RX); - - if (blink & MTK_PHY_LED_BLINK_TX) - *rules |= BIT(TRIGGER_NETDEV_TX); - - return 0; + return mtk_phy_led_hw_ctrl_get(phydev, index, rules, + MTK_GPHY_LED_ON_SET, + MTK_GPHY_LED_RX_BLINK_SET, + MTK_GPHY_LED_TX_BLINK_SET); }; static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index, unsigned long rules) { - unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0); - struct mtk_socphy_priv *priv = phydev->priv; - u16 on = 0, blink = 0; - int ret; - - if (index > 1) - return -EINVAL; - - if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX)) - on |= MTK_PHY_LED_ON_FDX; - - if (rules & BIT(TRIGGER_NETDEV_HALF_DUPLEX)) - on |= MTK_PHY_LED_ON_HDX; - - if (rules & (BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK))) - on |= MTK_PHY_LED_ON_LINK10; - - if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK))) - on |= MTK_PHY_LED_ON_LINK100; - - if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK))) - on |= MTK_PHY_LED_ON_LINK1000; - - if (rules & BIT(TRIGGER_NETDEV_RX)) { - blink |= (on & MTK_PHY_LED_ON_LINK) ? - (((on & MTK_PHY_LED_ON_LINK10) ? - MTK_PHY_LED_BLINK_10RX : 0) | - ((on & MTK_PHY_LED_ON_LINK100) ? - MTK_PHY_LED_BLINK_100RX : 0) | - ((on & MTK_PHY_LED_ON_LINK1000) ? - MTK_PHY_LED_BLINK_1000RX : 0)) : - MTK_PHY_LED_BLINK_RX; - } - - if (rules & BIT(TRIGGER_NETDEV_TX)) { - blink |= (on & MTK_PHY_LED_ON_LINK) ? - (((on & MTK_PHY_LED_ON_LINK10) ? - MTK_PHY_LED_BLINK_10TX : 0) | - ((on & MTK_PHY_LED_ON_LINK100) ? - MTK_PHY_LED_BLINK_100TX : 0) | - ((on & MTK_PHY_LED_ON_LINK1000) ? - MTK_PHY_LED_BLINK_1000TX : 0)) : - MTK_PHY_LED_BLINK_TX; - } - - if (blink || on) - set_bit(bit_netdev, &priv->led_state); - else - clear_bit(bit_netdev, &priv->led_state); - - ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ? - MTK_PHY_LED1_ON_CTRL : - MTK_PHY_LED0_ON_CTRL, - MTK_PHY_LED_ON_FDX | - MTK_PHY_LED_ON_HDX | - MTK_PHY_LED_ON_LINK, - on); - - if (ret) - return ret; - - return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ? - MTK_PHY_LED1_BLINK_CTRL : - MTK_PHY_LED0_BLINK_CTRL, blink); + return mtk_phy_led_hw_ctrl_set(phydev, index, rules, + MTK_GPHY_LED_ON_SET, + MTK_GPHY_LED_RX_BLINK_SET, + MTK_GPHY_LED_TX_BLINK_SET); }; static bool mt7988_phy_led_get_polarity(struct phy_device *phydev, int led_num) @@ -1492,14 +1260,6 @@ static int mt7988_phy_probe_shared(struct phy_device *phydev) return 0; } -static void mt798x_phy_leds_state_init(struct phy_device *phydev) -{ - int i; - - for (i = 0; i < 2; ++i) - mt798x_phy_led_hw_control_get(phydev, i, NULL); -} - static int mt7988_phy_probe(struct phy_device *phydev) { struct mtk_socphy_shared *shared; @@ -1525,7 +1285,7 @@ static int mt7988_phy_probe(struct phy_device *phydev) phydev->priv = priv; - mt798x_phy_leds_state_init(phydev); + mtk_phy_leds_state_init(phydev); err = mt7988_phy_fix_leds_polarities(phydev); if (err) @@ -1552,7 +1312,7 @@ static int mt7981_phy_probe(struct phy_device *phydev) phydev->priv = priv; - mt798x_phy_leds_state_init(phydev); + mtk_phy_leds_state_init(phydev); return mt798x_phy_calibration(phydev); } @@ -1567,8 +1327,8 @@ static struct phy_driver mtk_socphy_driver[] = { .probe = mt7981_phy_probe, .suspend = genphy_suspend, .resume = genphy_resume, - .read_page = mtk_socphy_read_page, - .write_page = mtk_socphy_write_page, + .read_page = mtk_phy_read_page, + .write_page = mtk_phy_write_page, .led_blink_set = mt798x_phy_led_blink_set, .led_brightness_set = mt798x_phy_led_brightness_set, .led_hw_is_supported = mt798x_phy_led_hw_is_supported, @@ -1584,8 +1344,8 @@ static struct phy_driver mtk_socphy_driver[] = { .probe = mt7988_phy_probe, .suspend = genphy_suspend, .resume = genphy_resume, - .read_page = mtk_socphy_read_page, - .write_page = mtk_socphy_write_page, + .read_page = mtk_phy_read_page, + .write_page = mtk_phy_write_page, .led_blink_set = mt798x_phy_led_blink_set, .led_brightness_set = mt798x_phy_led_brightness_set, .led_hw_is_supported = mt798x_phy_led_hw_is_supported, diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek/mtk-ge.c index 54ea64a37ab3..ed2617bc20f4 100644 --- a/drivers/net/phy/mediatek-ge.c +++ b/drivers/net/phy/mediatek/mtk-ge.c @@ -3,6 +3,11 @@ #include <linux/module.h> #include <linux/phy.h> +#include "mtk.h" + +#define MTK_GPHY_ID_MT7530 0x03a29412 +#define MTK_GPHY_ID_MT7531 0x03a29441 + #define MTK_EXT_PAGE_ACCESS 0x1f #define MTK_PHY_PAGE_STANDARD 0x0000 #define MTK_PHY_PAGE_EXTENDED 0x0001 @@ -11,16 +16,6 @@ #define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30 #define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5 -static int mtk_gephy_read_page(struct phy_device *phydev) -{ - return __phy_read(phydev, MTK_EXT_PAGE_ACCESS); -} - -static int mtk_gephy_write_page(struct phy_device *phydev, int page) -{ - return __phy_write(phydev, MTK_EXT_PAGE_ACCESS, page); -} - static void mtk_gephy_config_init(struct phy_device *phydev) { /* Enable HW auto downshift */ @@ -67,7 +62,7 @@ static int mt7531_phy_config_init(struct phy_device *phydev) static struct phy_driver mtk_gephy_driver[] = { { - PHY_ID_MATCH_EXACT(0x03a29412), + PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7530), .name = "MediaTek MT7530 PHY", .config_init = mt7530_phy_config_init, /* Interrupts are handled by the switch, not the PHY @@ -77,11 +72,11 @@ static struct phy_driver mtk_gephy_driver[] = { .handle_interrupt = genphy_handle_interrupt_no_ack, .suspend = genphy_suspend, .resume = genphy_resume, - .read_page = mtk_gephy_read_page, - .write_page = mtk_gephy_write_page, + .read_page = mtk_phy_read_page, + .write_page = mtk_phy_write_page, }, { - PHY_ID_MATCH_EXACT(0x03a29441), + PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7531), .name = "MediaTek MT7531 PHY", .config_init = mt7531_phy_config_init, /* Interrupts are handled by the switch, not the PHY @@ -91,16 +86,16 @@ static struct phy_driver mtk_gephy_driver[] = { .handle_interrupt = genphy_handle_interrupt_no_ack, .suspend = genphy_suspend, .resume = genphy_resume, - .read_page = mtk_gephy_read_page, - .write_page = mtk_gephy_write_page, + .read_page = mtk_phy_read_page, + .write_page = mtk_phy_write_page, }, }; module_phy_driver(mtk_gephy_driver); static struct mdio_device_id __maybe_unused mtk_gephy_tbl[] = { - { PHY_ID_MATCH_EXACT(0x03a29441) }, - { PHY_ID_MATCH_EXACT(0x03a29412) }, + { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7530) }, + { PHY_ID_MATCH_EXACT(MTK_GPHY_ID_MT7531) }, { } }; diff --git a/drivers/net/phy/mediatek/mtk-phy-lib.c b/drivers/net/phy/mediatek/mtk-phy-lib.c new file mode 100644 index 000000000000..98a09d670e9c --- /dev/null +++ b/drivers/net/phy/mediatek/mtk-phy-lib.c @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/phy.h> +#include <linux/module.h> + +#include <linux/netdevice.h> + +#include "mtk.h" + +int mtk_phy_read_page(struct phy_device *phydev) +{ + return __phy_read(phydev, MTK_EXT_PAGE_ACCESS); +} +EXPORT_SYMBOL_GPL(mtk_phy_read_page); + +int mtk_phy_write_page(struct phy_device *phydev, int page) +{ + return __phy_write(phydev, MTK_EXT_PAGE_ACCESS, page); +} +EXPORT_SYMBOL_GPL(mtk_phy_write_page); + +int mtk_phy_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules, + unsigned long supported_triggers) +{ + if (index > 1) + return -EINVAL; + + /* All combinations of the supported triggers are allowed */ + if (rules & ~supported_triggers) + return -EOPNOTSUPP; + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_phy_led_hw_is_supported); + +int mtk_phy_led_hw_ctrl_get(struct phy_device *phydev, u8 index, + unsigned long *rules, u16 on_set, + u16 rx_blink_set, u16 tx_blink_set) +{ + unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + + (index ? 16 : 0); + unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0); + unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0); + struct mtk_socphy_priv *priv = phydev->priv; + int on, blink; + + if (index > 1) + return -EINVAL; + + on = phy_read_mmd(phydev, MDIO_MMD_VEND2, + index ? MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL); + + if (on < 0) + return -EIO; + + blink = phy_read_mmd(phydev, MDIO_MMD_VEND2, + index ? MTK_PHY_LED1_BLINK_CTRL : + MTK_PHY_LED0_BLINK_CTRL); + if (blink < 0) + return -EIO; + + if ((on & (on_set | MTK_PHY_LED_ON_FDX | + MTK_PHY_LED_ON_HDX | MTK_PHY_LED_ON_LINKDOWN)) || + (blink & (rx_blink_set | tx_blink_set))) + set_bit(bit_netdev, &priv->led_state); + else + clear_bit(bit_netdev, &priv->led_state); + + if (on & MTK_PHY_LED_ON_FORCE_ON) + set_bit(bit_on, &priv->led_state); + else + clear_bit(bit_on, &priv->led_state); + + if (blink & MTK_PHY_LED_BLINK_FORCE_BLINK) + set_bit(bit_blink, &priv->led_state); + else + clear_bit(bit_blink, &priv->led_state); + + if (!rules) + return 0; + + if (on & on_set) + *rules |= BIT(TRIGGER_NETDEV_LINK); + + if (on & MTK_PHY_LED_ON_LINK10) + *rules |= BIT(TRIGGER_NETDEV_LINK_10); + + if (on & MTK_PHY_LED_ON_LINK100) + *rules |= BIT(TRIGGER_NETDEV_LINK_100); + + if (on & MTK_PHY_LED_ON_LINK1000) + *rules |= BIT(TRIGGER_NETDEV_LINK_1000); + + if (on & MTK_PHY_LED_ON_LINK2500) + *rules |= BIT(TRIGGER_NETDEV_LINK_2500); + + if (on & MTK_PHY_LED_ON_FDX) + *rules |= BIT(TRIGGER_NETDEV_FULL_DUPLEX); + + if (on & MTK_PHY_LED_ON_HDX) + *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX); + + if (blink & rx_blink_set) + *rules |= BIT(TRIGGER_NETDEV_RX); + + if (blink & tx_blink_set) + *rules |= BIT(TRIGGER_NETDEV_TX); + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_phy_led_hw_ctrl_get); + +int mtk_phy_led_hw_ctrl_set(struct phy_device *phydev, u8 index, + unsigned long rules, u16 on_set, + u16 rx_blink_set, u16 tx_blink_set) +{ + unsigned int bit_netdev = MTK_PHY_LED_STATE_NETDEV + (index ? 16 : 0); + struct mtk_socphy_priv *priv = phydev->priv; + u16 on = 0, blink = 0; + int ret; + + if (index > 1) + return -EINVAL; + + if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX)) + on |= MTK_PHY_LED_ON_FDX; + + if (rules & BIT(TRIGGER_NETDEV_HALF_DUPLEX)) + on |= MTK_PHY_LED_ON_HDX; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK))) + on |= MTK_PHY_LED_ON_LINK10; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK))) + on |= MTK_PHY_LED_ON_LINK100; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK))) + on |= MTK_PHY_LED_ON_LINK1000; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK))) + on |= MTK_PHY_LED_ON_LINK2500; + + if (rules & BIT(TRIGGER_NETDEV_RX)) { + if (on & on_set) { + if (on & MTK_PHY_LED_ON_LINK10) + blink |= MTK_PHY_LED_BLINK_10RX; + if (on & MTK_PHY_LED_ON_LINK100) + blink |= MTK_PHY_LED_BLINK_100RX; + if (on & MTK_PHY_LED_ON_LINK1000) + blink |= MTK_PHY_LED_BLINK_1000RX; + if (on & MTK_PHY_LED_ON_LINK2500) + blink |= MTK_PHY_LED_BLINK_2500RX; + } else { + blink |= rx_blink_set; + } + } + + if (rules & BIT(TRIGGER_NETDEV_TX)) { + if (on & on_set) { + if (on & MTK_PHY_LED_ON_LINK10) + blink |= MTK_PHY_LED_BLINK_10TX; + if (on & MTK_PHY_LED_ON_LINK100) + blink |= MTK_PHY_LED_BLINK_100TX; + if (on & MTK_PHY_LED_ON_LINK1000) + blink |= MTK_PHY_LED_BLINK_1000TX; + if (on & MTK_PHY_LED_ON_LINK2500) + blink |= MTK_PHY_LED_BLINK_2500TX; + } else { + blink |= tx_blink_set; + } + } + + if (blink || on) + set_bit(bit_netdev, &priv->led_state); + else + clear_bit(bit_netdev, &priv->led_state); + + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ? + MTK_PHY_LED1_ON_CTRL : MTK_PHY_LED0_ON_CTRL, + MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX | on_set, + on); + + if (ret) + return ret; + + return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ? + MTK_PHY_LED1_BLINK_CTRL : + MTK_PHY_LED0_BLINK_CTRL, blink); +} +EXPORT_SYMBOL_GPL(mtk_phy_led_hw_ctrl_set); + +int mtk_phy_led_num_dly_cfg(u8 index, unsigned long *delay_on, + unsigned long *delay_off, bool *blinking) +{ + if (index > 1) + return -EINVAL; + + if (delay_on && delay_off && (*delay_on > 0) && (*delay_off > 0)) { + *blinking = true; + *delay_on = 50; + *delay_off = 50; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_phy_led_num_dly_cfg); + +int mtk_phy_hw_led_on_set(struct phy_device *phydev, u8 index, + u16 led_on_mask, bool on) +{ + unsigned int bit_on = MTK_PHY_LED_STATE_FORCE_ON + (index ? 16 : 0); + struct mtk_socphy_priv *priv = phydev->priv; + bool changed; + + if (on) + changed = !test_and_set_bit(bit_on, &priv->led_state); + else + changed = !!test_and_clear_bit(bit_on, &priv->led_state); + + changed |= !!test_and_clear_bit(MTK_PHY_LED_STATE_NETDEV + + (index ? 16 : 0), &priv->led_state); + if (changed) + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, index ? + MTK_PHY_LED1_ON_CTRL : + MTK_PHY_LED0_ON_CTRL, + led_on_mask, + on ? MTK_PHY_LED_ON_FORCE_ON : 0); + else + return 0; +} +EXPORT_SYMBOL_GPL(mtk_phy_hw_led_on_set); + +int mtk_phy_hw_led_blink_set(struct phy_device *phydev, u8 index, bool blinking) +{ + unsigned int bit_blink = MTK_PHY_LED_STATE_FORCE_BLINK + + (index ? 16 : 0); + struct mtk_socphy_priv *priv = phydev->priv; + bool changed; + + if (blinking) + changed = !test_and_set_bit(bit_blink, &priv->led_state); + else + changed = !!test_and_clear_bit(bit_blink, &priv->led_state); + + changed |= !!test_bit(MTK_PHY_LED_STATE_NETDEV + + (index ? 16 : 0), &priv->led_state); + if (changed) + return phy_write_mmd(phydev, MDIO_MMD_VEND2, index ? + MTK_PHY_LED1_BLINK_CTRL : + MTK_PHY_LED0_BLINK_CTRL, + blinking ? + MTK_PHY_LED_BLINK_FORCE_BLINK : 0); + else + return 0; +} +EXPORT_SYMBOL_GPL(mtk_phy_hw_led_blink_set); + +void mtk_phy_leds_state_init(struct phy_device *phydev) +{ + int i; + + for (i = 0; i < 2; ++i) + phydev->drv->led_hw_control_get(phydev, i, NULL); +} +EXPORT_SYMBOL_GPL(mtk_phy_leds_state_init); + +MODULE_DESCRIPTION("MediaTek Ethernet PHY driver common"); +MODULE_AUTHOR("Sky Huang <SkyLake.Huang@mediatek.com>"); +MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mediatek/mtk.h b/drivers/net/phy/mediatek/mtk.h new file mode 100644 index 000000000000..63d9fe179b8f --- /dev/null +++ b/drivers/net/phy/mediatek/mtk.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Common definition for Mediatek Ethernet PHYs + * Author: SkyLake Huang <SkyLake.Huang@mediatek.com> + * Copyright (c) 2024 MediaTek Inc. + */ + +#ifndef _MTK_EPHY_H_ +#define _MTK_EPHY_H_ + +#define MTK_EXT_PAGE_ACCESS 0x1f + +/* Registers on MDIO_MMD_VEND2 */ +#define MTK_PHY_LED0_ON_CTRL 0x24 +#define MTK_PHY_LED1_ON_CTRL 0x26 +#define MTK_GPHY_LED_ON_MASK GENMASK(6, 0) +#define MTK_2P5GPHY_LED_ON_MASK GENMASK(7, 0) +#define MTK_PHY_LED_ON_LINK1000 BIT(0) +#define MTK_PHY_LED_ON_LINK100 BIT(1) +#define MTK_PHY_LED_ON_LINK10 BIT(2) +#define MTK_PHY_LED_ON_LINKDOWN BIT(3) +#define MTK_PHY_LED_ON_FDX BIT(4) /* Full duplex */ +#define MTK_PHY_LED_ON_HDX BIT(5) /* Half duplex */ +#define MTK_PHY_LED_ON_FORCE_ON BIT(6) +#define MTK_PHY_LED_ON_LINK2500 BIT(7) +#define MTK_PHY_LED_ON_POLARITY BIT(14) +#define MTK_PHY_LED_ON_ENABLE BIT(15) + +#define MTK_PHY_LED0_BLINK_CTRL 0x25 +#define MTK_PHY_LED1_BLINK_CTRL 0x27 +#define MTK_PHY_LED_BLINK_1000TX BIT(0) +#define MTK_PHY_LED_BLINK_1000RX BIT(1) +#define MTK_PHY_LED_BLINK_100TX BIT(2) +#define MTK_PHY_LED_BLINK_100RX BIT(3) +#define MTK_PHY_LED_BLINK_10TX BIT(4) +#define MTK_PHY_LED_BLINK_10RX BIT(5) +#define MTK_PHY_LED_BLINK_COLLISION BIT(6) +#define MTK_PHY_LED_BLINK_RX_CRC_ERR BIT(7) +#define MTK_PHY_LED_BLINK_RX_IDLE_ERR BIT(8) +#define MTK_PHY_LED_BLINK_FORCE_BLINK BIT(9) +#define MTK_PHY_LED_BLINK_2500TX BIT(10) +#define MTK_PHY_LED_BLINK_2500RX BIT(11) + +#define MTK_GPHY_LED_ON_SET (MTK_PHY_LED_ON_LINK1000 | \ + MTK_PHY_LED_ON_LINK100 | \ + MTK_PHY_LED_ON_LINK10) +#define MTK_GPHY_LED_RX_BLINK_SET (MTK_PHY_LED_BLINK_1000RX | \ + MTK_PHY_LED_BLINK_100RX | \ + MTK_PHY_LED_BLINK_10RX) +#define MTK_GPHY_LED_TX_BLINK_SET (MTK_PHY_LED_BLINK_1000RX | \ + MTK_PHY_LED_BLINK_100RX | \ + MTK_PHY_LED_BLINK_10RX) + +#define MTK_2P5GPHY_LED_ON_SET (MTK_PHY_LED_ON_LINK2500 | \ + MTK_GPHY_LED_ON_SET) +#define MTK_2P5GPHY_LED_RX_BLINK_SET (MTK_PHY_LED_BLINK_2500RX | \ + MTK_GPHY_LED_RX_BLINK_SET) +#define MTK_2P5GPHY_LED_TX_BLINK_SET (MTK_PHY_LED_BLINK_2500RX | \ + MTK_GPHY_LED_TX_BLINK_SET) + +#define MTK_PHY_LED_STATE_FORCE_ON 0 +#define MTK_PHY_LED_STATE_FORCE_BLINK 1 +#define MTK_PHY_LED_STATE_NETDEV 2 + +struct mtk_socphy_priv { + unsigned long led_state; +}; + +int mtk_phy_read_page(struct phy_device *phydev); +int mtk_phy_write_page(struct phy_device *phydev, int page); + +int mtk_phy_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules, + unsigned long supported_triggers); +int mtk_phy_led_hw_ctrl_set(struct phy_device *phydev, u8 index, + unsigned long rules, u16 on_set, + u16 rx_blink_set, u16 tx_blink_set); +int mtk_phy_led_hw_ctrl_get(struct phy_device *phydev, u8 index, + unsigned long *rules, u16 on_set, + u16 rx_blink_set, u16 tx_blink_set); +int mtk_phy_led_num_dly_cfg(u8 index, unsigned long *delay_on, + unsigned long *delay_off, bool *blinking); +int mtk_phy_hw_led_on_set(struct phy_device *phydev, u8 index, + u16 led_on_mask, bool on); +int mtk_phy_hw_led_blink_set(struct phy_device *phydev, u8 index, + bool blinking); +void mtk_phy_leds_state_init(struct phy_device *phydev); + +#endif /* _MTK_EPHY_H_ */ diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 43c82a87bc3a..3ef508840674 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -2004,7 +2004,7 @@ static int ksz9477_config_init(struct phy_device *phydev) * in this switch shall be regarded as broken. */ if (phydev->dev_flags & MICREL_NO_EEE) - phydev->eee_broken_modes = -1; + linkmode_fill(phydev->eee_broken_modes); return kszphy_config_init(phydev); } diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index c1b3576c307f..da5c15310d8c 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -683,15 +683,13 @@ EXPORT_SYMBOL_GPL(genphy_c45_read_mdix); static int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); int val, changed = 0; - if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP1_FEATURES)) { - val = linkmode_to_mii_eee_cap1_t(adv); + linkmode_andnot(tmp, adv, phydev->eee_broken_modes); - /* In eee_broken_modes are stored MDIO_AN_EEE_ADV specific raw - * register values. - */ - val &= ~phydev->eee_broken_modes; + if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP1_FEATURES)) { + val = linkmode_to_mii_eee_cap1_t(tmp); /* IEEE 802.3-2018 45.2.7.13 EEE advertisement 1 * (Register 7.60) @@ -709,7 +707,7 @@ static int genphy_c45_write_eee_adv(struct phy_device *phydev, } if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) { - val = linkmode_to_mii_eee_cap2_t(adv); + val = linkmode_to_mii_eee_cap2_t(tmp); /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2 * (Register 7.62) @@ -1523,20 +1521,17 @@ EXPORT_SYMBOL(genphy_c45_eee_is_active); int genphy_c45_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {}; - __ETHTOOL_DECLARE_LINK_MODE_MASK(lp) = {}; bool is_enabled; int ret; - ret = genphy_c45_eee_is_active(phydev, adv, lp, &is_enabled); + ret = genphy_c45_eee_is_active(phydev, data->advertised, + data->lp_advertised, &is_enabled); if (ret < 0) return ret; data->eee_enabled = is_enabled; data->eee_active = ret; linkmode_copy(data->supported, phydev->supported_eee); - linkmode_copy(data->advertised, adv); - linkmode_copy(data->lp_advertised, lp); return 0; } diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 4e8db12d6092..6bf3ec985f3d 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -388,28 +388,25 @@ void of_set_phy_supported(struct phy_device *phydev) void of_set_phy_eee_broken(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; - u32 broken = 0; + unsigned long *modes = phydev->eee_broken_modes; - if (!IS_ENABLED(CONFIG_OF_MDIO)) + if (!IS_ENABLED(CONFIG_OF_MDIO) || !node) return; - if (!node) - return; + linkmode_zero(modes); if (of_property_read_bool(node, "eee-broken-100tx")) - broken |= MDIO_EEE_100TX; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, modes); if (of_property_read_bool(node, "eee-broken-1000t")) - broken |= MDIO_EEE_1000T; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, modes); if (of_property_read_bool(node, "eee-broken-10gt")) - broken |= MDIO_EEE_10GT; + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, modes); if (of_property_read_bool(node, "eee-broken-1000kx")) - broken |= MDIO_EEE_1000KX; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, modes); if (of_property_read_bool(node, "eee-broken-10gkx4")) - broken |= MDIO_EEE_10GKX4; + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, modes); if (of_property_read_bool(node, "eee-broken-10gkr")) - broken |= MDIO_EEE_10GKR; - - phydev->eee_broken_modes = broken; + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, modes); } /** diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b1e828a4286d..30a654e98352 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -78,7 +78,7 @@ struct phylink { unsigned int pcs_neg_mode; unsigned int pcs_state; - bool mac_link_dropped; + bool link_failed; struct sfp_bus *sfp_bus; bool sfp_may_have_phy; @@ -1458,9 +1458,9 @@ static void phylink_resolve(struct work_struct *w) cur_link_state = pl->old_link_state; if (pl->phylink_disable_state) { - pl->mac_link_dropped = false; + pl->link_failed = false; link_state.link = false; - } else if (pl->mac_link_dropped) { + } else if (pl->link_failed) { link_state.link = false; retrigger = true; } else if (pl->cur_link_an_mode == MLO_AN_FIXED) { @@ -1545,7 +1545,7 @@ static void phylink_resolve(struct work_struct *w) phylink_link_up(pl, link_state); } if (!link_state.link && retrigger) { - pl->mac_link_dropped = false; + pl->link_failed = false; queue_work(system_power_efficient_wq, &pl->resolve); } mutex_unlock(&pl->state_mutex); @@ -1801,6 +1801,8 @@ static void phylink_phy_change(struct phy_device *phydev, bool up) pl->phy_state.pause |= MLO_PAUSE_RX; pl->phy_state.interface = phydev->interface; pl->phy_state.link = up; + if (!up) + pl->link_failed = true; mutex_unlock(&pl->state_mutex); phylink_run_resolve(pl); @@ -2124,7 +2126,7 @@ EXPORT_SYMBOL_GPL(phylink_disconnect_phy); static void phylink_link_changed(struct phylink *pl, bool up, const char *what) { if (!up) - pl->mac_link_dropped = true; + pl->link_failed = true; phylink_run_resolve(pl); phylink_dbg(pl, "%s link %s\n", what, up ? "up" : "down"); } @@ -2779,7 +2781,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, * link will cycle. */ if (manual_changed) { - pl->mac_link_dropped = true; + pl->link_failed = true; phylink_run_resolve(pl); } diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index f0441b3d7dcb..db9f9ebcb62d 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -837,12 +837,12 @@ static void ath10k_ahb_remove(struct platform_device *pdev) } static struct platform_driver ath10k_ahb_driver = { - .driver = { - .name = "ath10k_ahb", + .driver = { + .name = "ath10k_ahb", .of_match_table = ath10k_ahb_of_match, }, - .probe = ath10k_ahb_probe, - .remove_new = ath10k_ahb_remove, + .probe = ath10k_ahb_probe, + .remove = ath10k_ahb_remove, }; int ath10k_ahb_init(void) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 41ab83c3d3f7..c61b95a928da 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -6369,7 +6369,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = (void *)vif->drv_priv; struct cfg80211_scan_request *req = &hw_req->req; - struct wmi_start_scan_arg arg; + struct wmi_start_scan_arg *arg = NULL; int ret = 0; int i; u32 scan_timeout; @@ -6402,56 +6402,61 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, if (ret) goto exit; - memset(&arg, 0, sizeof(arg)); - ath10k_wmi_start_scan_init(ar, &arg); - arg.vdev_id = arvif->vdev_id; - arg.scan_id = ATH10K_SCAN_ID; + arg = kzalloc(sizeof(*arg), GFP_KERNEL); + if (!arg) { + ret = -ENOMEM; + goto exit; + } + + ath10k_wmi_start_scan_init(ar, arg); + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH10K_SCAN_ID; if (req->ie_len) { - arg.ie_len = req->ie_len; - memcpy(arg.ie, req->ie, arg.ie_len); + arg->ie_len = req->ie_len; + memcpy(arg->ie, req->ie, arg->ie_len); } if (req->n_ssids) { - arg.n_ssids = req->n_ssids; - for (i = 0; i < arg.n_ssids; i++) { - arg.ssids[i].len = req->ssids[i].ssid_len; - arg.ssids[i].ssid = req->ssids[i].ssid; + arg->n_ssids = req->n_ssids; + for (i = 0; i < arg->n_ssids; i++) { + arg->ssids[i].len = req->ssids[i].ssid_len; + arg->ssids[i].ssid = req->ssids[i].ssid; } } else { - arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; + arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; } if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { - arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; - ether_addr_copy(arg.mac_addr.addr, req->mac_addr); - ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); + arg->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; + ether_addr_copy(arg->mac_addr.addr, req->mac_addr); + ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask); } if (req->n_channels) { - arg.n_channels = req->n_channels; - for (i = 0; i < arg.n_channels; i++) - arg.channels[i] = req->channels[i]->center_freq; + arg->n_channels = req->n_channels; + for (i = 0; i < arg->n_channels; i++) + arg->channels[i] = req->channels[i]->center_freq; } /* if duration is set, default dwell times will be overwritten */ if (req->duration) { - arg.dwell_time_active = req->duration; - arg.dwell_time_passive = req->duration; - arg.burst_duration_ms = req->duration; + arg->dwell_time_active = req->duration; + arg->dwell_time_passive = req->duration; + arg->burst_duration_ms = req->duration; - scan_timeout = min_t(u32, arg.max_rest_time * - (arg.n_channels - 1) + (req->duration + + scan_timeout = min_t(u32, arg->max_rest_time * + (arg->n_channels - 1) + (req->duration + ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * - arg.n_channels, arg.max_scan_time); + arg->n_channels, arg->max_scan_time); } else { - scan_timeout = arg.max_scan_time; + scan_timeout = arg->max_scan_time; } /* Add a 200ms margin to account for event/command processing */ scan_timeout += 200; - ret = ath10k_start_scan(ar, &arg); + ret = ath10k_start_scan(ar, arg); if (ret) { ath10k_warn(ar, "failed to start hw scan: %d\n", ret); spin_lock_bh(&ar->data_lock); @@ -6463,6 +6468,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, msecs_to_jiffies(scan_timeout)); exit: + kfree(arg); + mutex_unlock(&ar->conf_mutex); return ret; } @@ -7899,7 +7906,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = (void *)vif->drv_priv; - struct wmi_start_scan_arg arg; + struct wmi_start_scan_arg *arg = NULL; int ret = 0; u32 scan_time_msec; @@ -7936,20 +7943,25 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; - memset(&arg, 0, sizeof(arg)); - ath10k_wmi_start_scan_init(ar, &arg); - arg.vdev_id = arvif->vdev_id; - arg.scan_id = ATH10K_SCAN_ID; - arg.n_channels = 1; - arg.channels[0] = chan->center_freq; - arg.dwell_time_active = scan_time_msec; - arg.dwell_time_passive = scan_time_msec; - arg.max_scan_time = scan_time_msec; - arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; - arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; - arg.burst_duration_ms = duration; - - ret = ath10k_start_scan(ar, &arg); + arg = kzalloc(sizeof(*arg), GFP_KERNEL); + if (!arg) { + ret = -ENOMEM; + goto exit; + } + + ath10k_wmi_start_scan_init(ar, arg); + arg->vdev_id = arvif->vdev_id; + arg->scan_id = ATH10K_SCAN_ID; + arg->n_channels = 1; + arg->channels[0] = chan->center_freq; + arg->dwell_time_active = scan_time_msec; + arg->dwell_time_passive = scan_time_msec; + arg->max_scan_time = scan_time_msec; + arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; + arg->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; + arg->burst_duration_ms = duration; + + ret = ath10k_start_scan(ar, arg); if (ret) { ath10k_warn(ar, "failed to start roc scan: %d\n", ret); spin_lock_bh(&ar->data_lock); @@ -7975,6 +7987,8 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw, ret = 0; exit: + kfree(arg); + mutex_unlock(&ar->conf_mutex); return ret; } @@ -9122,7 +9136,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[ {6, {2633, 2925}, {1215, 1350}, {585, 650} }, {7, {2925, 3250}, {1350, 1500}, {650, 722} }, {8, {3510, 3900}, {1620, 1800}, {780, 867} }, - {9, {3900, 4333}, {1800, 2000}, {780, 867} } + {9, {3900, 4333}, {1800, 2000}, {865, 960} } }; /*MCS parameters with Nss = 2 */ @@ -9137,7 +9151,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[ {6, {5265, 5850}, {2430, 2700}, {1170, 1300} }, {7, {5850, 6500}, {2700, 3000}, {1300, 1444} }, {8, {7020, 7800}, {3240, 3600}, {1560, 1733} }, - {9, {7800, 8667}, {3600, 4000}, {1560, 1733} } + {9, {7800, 8667}, {3600, 4000}, {1730, 1920} } }; static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs, diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 08a6f36a6be9..6805357ee29e 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -3,7 +3,7 @@ * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com> - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/module.h> @@ -2648,9 +2648,9 @@ static void ath10k_sdio_remove(struct sdio_func *func) netif_napi_del(&ar->napi); - ath10k_core_destroy(ar); - destroy_workqueue(ar_sdio->workqueue); + + ath10k_core_destroy(ar); } static const struct sdio_device_id ath10k_sdio_devices[] = { diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 0fe47d51013c..d436a874cd5a 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1885,11 +1885,11 @@ static void ath10k_snoc_shutdown(struct platform_device *pdev) } static struct platform_driver ath10k_snoc_driver = { - .probe = ath10k_snoc_probe, - .remove_new = ath10k_snoc_remove, + .probe = ath10k_snoc_probe, + .remove = ath10k_snoc_remove, .shutdown = ath10k_snoc_shutdown, .driver = { - .name = "ath10k_snoc", + .name = "ath10k_snoc", .of_match_table = ath10k_snoc_dt_match, }, }; diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 97b12f51ef28..f2fc04596d48 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -1000,18 +1000,18 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) if (!ab->hw_params.fixed_fw_mem) return 0; - ret = ath11k_ahb_setup_msa_resources(ab); - if (ret) { - ath11k_err(ab, "failed to setup msa resources\n"); - return ret; - } - node = of_get_child_by_name(host_dev->of_node, "wifi-firmware"); if (!node) { ab_ahb->fw.use_tz = true; return 0; } + ret = ath11k_ahb_setup_msa_resources(ab); + if (ret) { + ath11k_err(ab, "failed to setup msa resources\n"); + return ret; + } + info.fwnode = &node->fwnode; info.parent = host_dev; info.name = node->name; @@ -1313,12 +1313,12 @@ free_resources: } static struct platform_driver ath11k_ahb_driver = { - .driver = { - .name = "ath11k", + .driver = { + .name = "ath11k", .of_match_table = ath11k_ahb_of_match, }, - .probe = ath11k_ahb_probe, - .remove_new = ath11k_ahb_remove, + .probe = ath11k_ahb_probe, + .remove = ath11k_ahb_remove, .shutdown = ath11k_ahb_shutdown, }; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index ccf4ad35fdc3..be67382c00f6 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -616,7 +616,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .supports_dynamic_smps_6ghz = false, .alloc_cacheable_memory = false, .supports_rssi_stats = true, - .fw_wmi_diag_event = false, + .fw_wmi_diag_event = true, .current_cc_support = true, .dbr_debug_support = false, .global_reset = false, diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index f02599bd1c36..61f4b6dd5380 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -1351,6 +1351,7 @@ void ath11k_hal_srng_deinit(struct ath11k_base *ab) ath11k_hal_free_cont_rdp(ab); ath11k_hal_free_cont_wrp(ab); kfree(hal->srng_config); + hal->srng_config = NULL; } EXPORT_SYMBOL(ath11k_hal_srng_deinit); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index f477afd325de..7a22483b35cd 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -2180,6 +2180,9 @@ static int ath11k_qmi_request_device_info(struct ath11k_base *ab) ab->mem = bar_addr_va; ab->mem_len = resp.bar_size; + if (!ab->hw_params.ce_remap) + ab->mem_ce = ab->mem; + return 0; out: return ret; diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c index 99d8ba45a75b..827085a926b2 100644 --- a/drivers/net/wireless/ath/ath11k/wow.c +++ b/drivers/net/wireless/ath/ath11k/wow.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/delay.h> @@ -155,6 +155,7 @@ static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new, u8 hdr_8023_bit_mask[ETH_HLEN] = {}; u8 hdr_80211_pattern[WOW_HDR_LEN] = {}; u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {}; + u8 bytemask[WOW_MAX_PATTERN_SIZE] = {}; int total_len = old->pkt_offset + old->pattern_len; int hdr_80211_end_offset; @@ -172,11 +173,17 @@ static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new, struct rfc1042_hdr *new_rfc_mask = (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len); int rfc_len = sizeof(*new_rfc_pattern); + int i; + + /* convert bitmask to bytemask */ + for (i = 0; i < old->pattern_len; i++) + if (old->mask[i / 8] & BIT(i % 8)) + bytemask[i] = 0xff; memcpy(hdr_8023_pattern + old->pkt_offset, old->pattern, ETH_HLEN - old->pkt_offset); memcpy(hdr_8023_bit_mask + old->pkt_offset, - old->mask, ETH_HLEN - old->pkt_offset); + bytemask, ETH_HLEN - old->pkt_offset); /* Copy destination address */ memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN); @@ -232,7 +239,7 @@ static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new, (void *)old->pattern + ETH_HLEN - old->pkt_offset, total_len - ETH_HLEN); memcpy((u8 *)new->mask + new->pattern_len, - (void *)old->mask + ETH_HLEN - old->pkt_offset, + bytemask + ETH_HLEN - old->pkt_offset, total_len - ETH_HLEN); new->pattern_len += total_len - ETH_HLEN; @@ -393,35 +400,31 @@ static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif, } for (i = 0; i < wowlan->n_patterns; i++) { - u8 bitmask[WOW_MAX_PATTERN_SIZE] = {}; u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {}; u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {}; struct cfg80211_pkt_pattern new_pattern = {}; - struct cfg80211_pkt_pattern old_pattern = patterns[i]; - int j; new_pattern.pattern = ath_pattern; new_pattern.mask = ath_bitmask; if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE) continue; - /* convert bytemask to bitmask */ - for (j = 0; j < patterns[i].pattern_len; j++) - if (patterns[i].mask[j / 8] & BIT(j % 8)) - bitmask[j] = 0xff; - old_pattern.mask = bitmask; if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode == ATH11K_HW_TXRX_NATIVE_WIFI) { if (patterns[i].pkt_offset < ETH_HLEN) { - u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {}; - - memcpy(pattern_ext, old_pattern.pattern, - old_pattern.pattern_len); - old_pattern.pattern = pattern_ext; ath11k_wow_convert_8023_to_80211(&new_pattern, - &old_pattern); + &patterns[i]); } else { - new_pattern = old_pattern; + int j; + + new_pattern = patterns[i]; + new_pattern.mask = ath_bitmask; + + /* convert bitmask to bytemask */ + for (j = 0; j < patterns[i].pattern_len; j++) + if (patterns[i].mask[j / 8] & BIT(j % 8)) + ath_bitmask[j] = 0xff; + new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN; } } diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig index f64e7c322216..52a1bb19e3da 100644 --- a/drivers/net/wireless/ath/ath12k/Kconfig +++ b/drivers/net/wireless/ath/ath12k/Kconfig @@ -42,3 +42,13 @@ config ATH12K_TRACING If unsure, say Y to make it easier to debug problems. But if you want optimal performance choose N. + +config ATH12K_COREDUMP + bool "ath12k coredump" + depends on ATH12K + select WANT_DEV_COREDUMP + help + Enable ath12k coredump collection + + If unsure, say Y to make it easier to debug problems. But if + dump collection not required choose N. diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile index 5a1ed20d730e..b5bb3e2599cd 100644 --- a/drivers/net/wireless/ath/ath12k/Makefile +++ b/drivers/net/wireless/ath/ath12k/Makefile @@ -27,6 +27,7 @@ ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o ath12k-$(CONFIG_ACPI) += acpi.o ath12k-$(CONFIG_ATH12K_TRACING) += trace.o ath12k-$(CONFIG_PM) += wow.o +ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h index 857bc5f9e946..1a14b9fb86b8 100644 --- a/drivers/net/wireless/ath/ath12k/ce.h +++ b/drivers/net/wireless/ath/ath12k/ce.h @@ -148,7 +148,7 @@ struct ath12k_ce_pipe { void (*send_cb)(struct ath12k_ce_pipe *pipe); void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb); - struct tasklet_struct intr_tq; + struct work_struct intr_wq; struct ath12k_ce_ring *src_ring; struct ath12k_ce_ring *dest_ring; struct ath12k_ce_ring *status_ring; diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 51252e8bc1ae..c57322221e1d 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -1004,7 +1004,7 @@ void ath12k_core_halt(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ar->num_created_vdevs = 0; ar->allocated_vdev_map = 0; @@ -1078,6 +1078,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) if (!ah || ah->state == ATH12K_HW_STATE_OFF) continue; + wiphy_lock(ah->hw->wiphy); mutex_lock(&ah->hw_mutex); switch (ah->state) { @@ -1086,10 +1087,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) for (j = 0; j < ah->num_radio; j++) { ar = &ah->radio[j]; - - mutex_lock(&ar->conf_mutex); ath12k_core_halt(ar); - mutex_unlock(&ar->conf_mutex); } break; @@ -1110,6 +1108,7 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) } mutex_unlock(&ah->hw_mutex); + wiphy_unlock(ah->hw->wiphy); } complete(&ab->driver_recovery); @@ -1188,6 +1187,7 @@ static void ath12k_core_reset(struct work_struct *work) ab->is_reset = true; atomic_set(&ab->recovery_count, 0); + ath12k_coredump_collect(ab); ath12k_core_pre_reconfigure_recovery(ab); ath12k_core_post_reconfigure_recovery(ab); @@ -1312,6 +1312,7 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, INIT_WORK(&ab->restart_work, ath12k_core_restart); INIT_WORK(&ab->reset_work, ath12k_core_reset); INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work); + INIT_WORK(&ab->dump_work, ath12k_coredump_upload); timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 7f2e9a9b4097..3bf31ee5b9fa 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -30,6 +30,7 @@ #include "acpi.h" #include "wow.h" #include "debugfs_htt_stats.h" +#include "coredump.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -94,6 +95,14 @@ static inline enum wme_ac ath12k_tid_to_ac(u32 tid) WME_AC_VO); } +static inline u64 ath12k_le32hilo_to_u64(__le32 hi, __le32 lo) +{ + u64 hi64 = le32_to_cpu(hi); + u64 lo64 = le32_to_cpu(lo); + + return (hi64 << 32) | lo64; +} + enum ath12k_skb_flags { ATH12K_SKB_HW_80211_ENCAP = BIT(0), ATH12K_SKB_CIPHER_SET = BIT(1), @@ -220,8 +229,9 @@ struct ath12k_tx_conf { }; struct ath12k_key_conf { - bool changed; enum set_key_cmd cmd; + struct list_head list; + struct ieee80211_sta *sta; struct ieee80211_key_conf *key; }; @@ -238,10 +248,8 @@ struct ath12k_rekey_data { bool enable_offload; }; -struct ath12k_vif { +struct ath12k_link_vif { u32 vdev_id; - enum wmi_vdev_type vdev_type; - enum wmi_vdev_subtype vdev_subtype; u32 beacon_interval; u32 dtim_period; u16 ast_hash; @@ -251,13 +259,38 @@ struct ath12k_vif { u8 search_type; struct ath12k *ar; - struct ieee80211_vif *vif; int bank_id; u8 vdev_id_check_en; struct wmi_wmm_params_all_arg wmm_params; struct list_head list; + + bool is_created; + bool is_started; + bool is_up; + u8 bssid[ETH_ALEN]; + struct cfg80211_bitrate_mask bitrate_mask; + struct delayed_work connection_loss_work; + int num_legacy_stations; + int rtscts_prot_mode; + int txpower; + bool rsnie_present; + bool wpaie_present; + struct ieee80211_chanctx_conf chanctx; + u8 vdev_stats_id; + u32 punct_bitmap; + u8 link_id; + struct ath12k_vif *ahvif; + struct ath12k_rekey_data rekey_data; +}; + +struct ath12k_vif { + enum wmi_vdev_type vdev_type; + enum wmi_vdev_subtype vdev_subtype; + struct ieee80211_vif *vif; + struct ath12k_hw *ah; + union { struct { u32 uapsd; @@ -275,25 +308,16 @@ struct ath12k_vif { } ap; } u; - bool is_created; - bool is_started; - bool is_up; u32 aid; - u8 bssid[ETH_ALEN]; - struct cfg80211_bitrate_mask bitrate_mask; - struct delayed_work connection_loss_work; - int num_legacy_stations; - int rtscts_prot_mode; - int txpower; - bool rsnie_present; - bool wpaie_present; u32 key_cipher; u8 tx_encap_type; - u8 vdev_stats_id; - u32 punct_bitmap; bool ps; - struct ath12k_vif_cache *cache; - struct ath12k_rekey_data rekey_data; + + struct ath12k_link_vif deflink; + struct ath12k_link_vif __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS]; + /* indicates bitmap of link vif created in FW */ + u16 links_map; /* Must be last - ends in a flexible-array member. * @@ -306,7 +330,7 @@ struct ath12k_vif { struct ath12k_vif_iter { u32 vdev_id; struct ath12k *ar; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; }; #define HAL_AST_IDX_INVALID 0xFFFF @@ -441,27 +465,36 @@ struct ath12k_wbm_tx_stats { u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX]; }; -struct ath12k_sta { - struct ath12k_vif *arvif; +struct ath12k_link_sta { + struct ath12k_link_vif *arvif; + struct ath12k_sta *ahsta; /* the following are protected by ar->data_lock */ u32 changed; /* IEEE80211_RC_* */ u32 bw; u32 nss; u32 smps; - enum hal_pn_type pn_type; - struct work_struct update_wk; + struct wiphy_work update_wk; struct rate_info txrate; struct rate_info last_txrate; u64 rx_duration; u64 tx_duration; u8 rssi_comb; + u8 link_id; struct ath12k_rx_peer_stats *rx_stats; struct ath12k_wbm_tx_stats *wbm_tx_stats; u32 bw_prev; }; +struct ath12k_sta { + enum hal_pn_type pn_type; + struct ath12k_link_sta deflink; + struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + /* indicates bitmap of link sta created in FW */ + u16 links_map; +}; + #define ATH12K_MIN_5G_FREQ 4150 #define ATH12K_MIN_6G_FREQ 5925 #define ATH12K_MAX_6G_FREQ 7115 @@ -561,13 +594,9 @@ struct ath12k { u32 chan_tx_pwr; u32 num_stations; u32 max_num_stations; - bool monitor_present; - /* To synchronize concurrent synchronous mac80211 callback operations, - * concurrent debugfs configuration and concurrent FW statistics events. - */ - struct mutex conf_mutex; + /* protects the radio specific data like debug stats, ppdu_stats_info stats, - * vdev_stop_status info, scan data, ath12k_sta info, ath12k_vif info, + * vdev_stop_status info, scan data, ath12k_sta info, ath12k_link_vif info, * channel context data, survey info, test mode data. */ spinlock_t data_lock; @@ -666,6 +695,7 @@ struct ath12k_hw { enum ath12k_hw_state state; bool regd_updated; bool use_6ghz_regd; + u8 num_radio; /* Keep last */ @@ -782,6 +812,10 @@ struct ath12k_base { /* HW channel counters frequency value in hertz common to all MACs */ u32 cc_freq_hz; + struct ath12k_dump_file_data *dump_data; + size_t ath12k_coredump_len; + struct work_struct dump_work; + struct ath12k_htc htc; struct ath12k_dp dp; @@ -1024,16 +1058,26 @@ static inline struct ath12k_skb_rxcb *ATH12K_SKB_RXCB(struct sk_buff *skb) return (struct ath12k_skb_rxcb *)skb->cb; } -static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif) +static inline struct ath12k_vif *ath12k_vif_to_ahvif(struct ieee80211_vif *vif) { return (struct ath12k_vif *)vif->drv_priv; } -static inline struct ath12k_sta *ath12k_sta_to_arsta(struct ieee80211_sta *sta) +static inline struct ath12k_sta *ath12k_sta_to_ahsta(struct ieee80211_sta *sta) { return (struct ath12k_sta *)sta->drv_priv; } +static inline struct ieee80211_sta *ath12k_ahsta_to_sta(struct ath12k_sta *ahsta) +{ + return container_of((void *)ahsta, struct ieee80211_sta, drv_priv); +} + +static inline struct ieee80211_vif *ath12k_ahvif_to_vif(struct ath12k_vif *ahvif) +{ + return container_of((void *)ahvif, struct ieee80211_vif, drv_priv); +} + static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab, int mac_id) { diff --git a/drivers/net/wireless/ath/ath12k/coredump.c b/drivers/net/wireless/ath/ath12k/coredump.c new file mode 100644 index 000000000000..72d675d15e64 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/coredump.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include <linux/devcoredump.h> +#include "hif.h" +#include "coredump.h" +#include "debug.h" + +enum +ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type(enum ath12k_qmi_target_mem type) +{ + enum ath12k_fw_crash_dump_type dump_type; + + switch (type) { + case HOST_DDR_REGION_TYPE: + dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA; + break; + case M3_DUMP_REGION_TYPE: + dump_type = FW_CRASH_DUMP_M3_DUMP; + break; + case PAGEABLE_MEM_REGION_TYPE: + dump_type = FW_CRASH_DUMP_PAGEABLE_DATA; + break; + case BDF_MEM_REGION_TYPE: + case CALDB_MEM_REGION_TYPE: + dump_type = FW_CRASH_DUMP_NONE; + break; + default: + dump_type = FW_CRASH_DUMP_TYPE_MAX; + break; + } + + return dump_type; +} + +void ath12k_coredump_upload(struct work_struct *work) +{ + struct ath12k_base *ab = container_of(work, struct ath12k_base, dump_work); + + ath12k_info(ab, "Uploading coredump\n"); + /* dev_coredumpv() takes ownership of the buffer */ + dev_coredumpv(ab->dev, ab->dump_data, ab->ath12k_coredump_len, GFP_KERNEL); + ab->dump_data = NULL; +} + +void ath12k_coredump_collect(struct ath12k_base *ab) +{ + ath12k_hif_coredump_download(ab); +} diff --git a/drivers/net/wireless/ath/ath12k/coredump.h b/drivers/net/wireless/ath/ath12k/coredump.h new file mode 100644 index 000000000000..5d6003b1c12d --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/coredump.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _ATH12K_COREDUMP_H_ +#define _ATH12K_COREDUMP_H_ + +#define ATH12K_FW_CRASH_DUMP_V2 2 + +enum ath12k_fw_crash_dump_type { + FW_CRASH_DUMP_PAGING_DATA, + FW_CRASH_DUMP_RDDM_DATA, + FW_CRASH_DUMP_REMOTE_MEM_DATA, + FW_CRASH_DUMP_PAGEABLE_DATA, + FW_CRASH_DUMP_M3_DUMP, + FW_CRASH_DUMP_NONE, + + /* keep last */ + FW_CRASH_DUMP_TYPE_MAX, +}; + +#define COREDUMP_TLV_HDR_SIZE 8 + +struct ath12k_tlv_dump_data { + /* see ath11k_fw_crash_dump_type above */ + __le32 type; + + /* in bytes */ + __le32 tlv_len; + + /* pad to 32-bit boundaries as needed */ + u8 tlv_data[]; +} __packed; + +struct ath12k_dump_file_data { + /* "ATH12K-FW-DUMP" */ + char df_magic[16]; + /* total dump len in bytes */ + __le32 len; + /* file dump version */ + __le32 version; + /* pci device id */ + __le32 chip_id; + /* qrtr instance id */ + __le32 qrtr_id; + /* pci domain id */ + __le32 bus_id; + guid_t guid; + /* time-of-day stamp */ + __le64 tv_sec; + /* time-of-day stamp, nano-seconds */ + __le64 tv_nsec; + /* room for growth w/out changing binary format */ + u8 unused[128]; + u8 data[]; +} __packed; + +#ifdef CONFIG_ATH12K_COREDUMP +enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type + (enum ath12k_qmi_target_mem type); +void ath12k_coredump_upload(struct work_struct *work); +void ath12k_coredump_collect(struct ath12k_base *ab); +#else +static inline enum ath12k_fw_crash_dump_type ath12k_coredump_get_dump_type + (enum ath12k_qmi_target_mem type) +{ + return FW_CRASH_DUMP_TYPE_MAX; +} + +static inline void ath12k_coredump_upload(struct work_struct *work) +{ +} + +static inline void ath12k_coredump_collect(struct ath12k_base *ab) +{ +} +#endif + +#endif diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c index 2a977c36af00..d4b32d1a431c 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.c +++ b/drivers/net/wireless/ath/ath12k/debugfs.c @@ -15,14 +15,14 @@ static ssize_t ath12k_write_simulate_radar(struct file *file, struct ath12k *ar = file->private_data; int ret; - mutex_lock(&ar->conf_mutex); + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); ret = ath12k_wmi_simulate_radar(ar); if (ret) goto exit; ret = count; exit: - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); return ret; } diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c index f1b7e74aefe4..c9980c0193d1 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c @@ -12,8 +12,8 @@ #include "dp_rx.h" static u32 -print_array_to_buf(u8 *buf, u32 offset, const char *header, - const __le32 *array, u32 array_len, const char *footer) +print_array_to_buf_index(u8 *buf, u32 offset, const char *header, u32 stats_index, + const __le32 *array, u32 array_len, const char *footer) { int index = 0; u8 i; @@ -26,7 +26,7 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header, for (i = 0; i < array_len; i++) { index += scnprintf(buf + offset + index, (ATH12K_HTT_STATS_BUF_SIZE - offset) - index, - " %u:%u,", i, le32_to_cpu(array[i])); + " %u:%u,", stats_index++, le32_to_cpu(array[i])); } /* To overwrite the last trailing comma */ index--; @@ -40,6 +40,54 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header, return index; } +static u32 +print_array_to_buf(u8 *buf, u32 offset, const char *header, + const __le32 *array, u32 array_len, const char *footer) +{ + return print_array_to_buf_index(buf, offset, header, 0, array, array_len, + footer); +} + +static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size) +{ + switch (ru_size) { + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26: + return "26"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52: + return "52"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52_26: + return "52+26"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106: + return "106"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106_26: + return "106+26"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_242: + return "242"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484: + return "484"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484_242: + return "484+242"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996: + return "996"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484: + return "996+484"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484_242: + return "996+484+242"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2: + return "996x2"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2_484: + return "996x2+484"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3: + return "996x3"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3_484: + return "996x3+484"; + case ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x4: + return "996x4"; + default: + return "unknown"; + } +} + static void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) @@ -1447,6 +1495,1207 @@ ath12k_htt_print_tx_de_compl_stats_tlv(const void *tag_buf, u16 tag_len, stats_req->buf_len = len; } +static void +ath12k_htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n", + le32_to_cpu(htt_stats_buf->su_bar)); + len += scnprintf(buf + len, buf_len - len, "rts = %u\n", + le32_to_cpu(htt_stats_buf->rts)); + len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n", + le32_to_cpu(htt_stats_buf->cts2self)); + len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n", + le32_to_cpu(htt_stats_buf->qos_null)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_1)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_2)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_3)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_4)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_5)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n", + le32_to_cpu(htt_stats_buf->delayed_bar_6)); + len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n", + le32_to_cpu(htt_stats_buf->delayed_bar_7)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->ac_su_ndpa)); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->ac_su_ndp)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndpa)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp)); + len += print_array_to_buf_index(buf, len, "ac_mu_mimo_brpollX_tried = ", 1, + htt_stats_buf->ac_mu_mimo_brpoll, + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->ax_su_ndpa)); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->ax_su_ndp)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndpa)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp)); + len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpollX_tried = ", 1, + htt_stats_buf->ax_mu_mimo_brpoll, + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n"); + len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n", + le32_to_cpu(htt_stats_buf->ax_basic_trigger)); + len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger = %u\n", + le32_to_cpu(htt_stats_buf->ax_ulmumimo_trigger)); + len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n", + le32_to_cpu(htt_stats_buf->ax_bsr_trigger)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_bar_trigger)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n", + le32_to_cpu(htt_stats_buf->ax_mu_rts_trigger)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_be_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_be_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_BE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_queued = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndpa_queued)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndpa)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndp_queued = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndp_queued)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndp)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_queued = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_queued)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_tried = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_queued = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_queued)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_tried = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp)); + len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_queued = ", 1, + htt_stats_buf->be_mu_mimo_brpoll_queued, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1, + "\n"); + len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_tried = ", 1, + htt_stats_buf->be_mu_mimo_brpoll, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1, + "\n"); + len += print_array_to_buf(buf, len, "be_ul_mumimo_trigger = ", + htt_stats_buf->be_ul_mumimo_trigger, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n"); + len += scnprintf(buf + len, buf_len - len, "be_basic_trigger = %u\n", + le32_to_cpu(htt_stats_buf->be_basic_trigger)); + len += scnprintf(buf + len, buf_len - len, "be_ulmumimo_total_trigger = %u\n", + le32_to_cpu(htt_stats_buf->be_ulmumimo_trigger)); + len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger = %u\n", + le32_to_cpu(htt_stats_buf->be_bsr_trigger)); + len += scnprintf(buf + len, buf_len - len, "be_mu_bar_trigger = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_bar_trigger)); + len += scnprintf(buf + len, buf_len - len, "be_mu_rts_trigger = %u\n\n", + le32_to_cpu(htt_stats_buf->be_mu_rts_trigger)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_su_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_su_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp1_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp2_err)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_brp3_err)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_su_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_su_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_mimo_ndp_err)); + len += print_array_to_buf_index(buf, len, "ax_mu_mimo_brpX_err", 1, + htt_stats_buf->ax_mu_mimo_brp_err, + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1, + "\n"); + len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_basic_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_total_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_ulmumimo_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_bsr_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_bar_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n", + le32_to_cpu(htt_stats_buf->ax_mu_rts_trigger_err)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_be_err_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_selfgen_be_err_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_BE_ERR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "be_su_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndp_flushed = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndp_flushed)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "be_su_ndpa_flushed = %u\n", + le32_to_cpu(htt_stats_buf->be_su_ndpa_flushed)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_err = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_err)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndpa_flushed = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndpa_flushed)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_err = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_err)); + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_ndp_flushed = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_mimo_ndp_flushed)); + len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpX_err", 1, + htt_stats_buf->be_mu_mimo_brp_err, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1, + "\n"); + len += print_array_to_buf_index(buf, len, "be_mu_mimo_brpollX_flushed", 1, + htt_stats_buf->be_mu_mimo_brpoll_flushed, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1, + "\n"); + len += print_array_to_buf(buf, len, "be_mu_mimo_num_cbf_rcvd_on_brp_err", + htt_stats_buf->be_mu_mimo_brp_err_num_cbf_rxd, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_ul_mumimo_trigger_err", + htt_stats_buf->be_ul_mumimo_trigger_err, + ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS, "\n"); + len += scnprintf(buf + len, buf_len - len, "be_basic_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->be_basic_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "be_ulmumimo_total_trig_err = %u\n", + le32_to_cpu(htt_stats_buf->be_ulmumimo_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "be_bsr_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->be_bsr_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "be_mu_bar_trigger_err = %u\n", + le32_to_cpu(htt_stats_buf->be_mu_bar_trigger_err)); + len += scnprintf(buf + len, buf_len - len, "be_mu_rts_trigger_err = %u\n\n", + le32_to_cpu(htt_stats_buf->be_mu_rts_trigger_err)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ac_sched_status_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats) +{ + const struct ath12k_htt_tx_selfgen_ac_sched_status_stats_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats->buf; + u32 len = stats->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_SELFGEN_AC_SCHED_STATUS_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "ac_su_ndpa_sch_status", + htt_stats_buf->ac_su_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_su_ndp_sch_status", + htt_stats_buf->ac_su_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_ndpa_sch_status", + htt_stats_buf->ac_mu_mimo_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_ndp_sch_status", + htt_stats_buf->ac_mu_mimo_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_brp_sch_status", + htt_stats_buf->ac_mu_mimo_brp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ac_su_ndp_sch_flag_err", + htt_stats_buf->ac_su_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_ndp_sch_flag_err", + htt_stats_buf->ac_mu_mimo_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_brp_sch_flag_err", + htt_stats_buf->ac_mu_mimo_brp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n"); + + stats->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_ax_sched_status_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats) +{ + const struct ath12k_htt_tx_selfgen_ax_sched_status_stats_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats->buf; + u32 len = stats->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_SELFGEN_AX_SCHED_STATUS_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "ax_su_ndpa_sch_status", + htt_stats_buf->ax_su_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_su_ndp_sch_status", + htt_stats_buf->ax_su_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_mimo_ndpa_sch_status", + htt_stats_buf->ax_mu_mimo_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_mimo_ndp_sch_status", + htt_stats_buf->ax_mu_mimo_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_brp_sch_status", + htt_stats_buf->ax_mu_brp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_bar_sch_status", + htt_stats_buf->ax_mu_bar_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_basic_trig_sch_status", + htt_stats_buf->ax_basic_trig_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_su_ndp_sch_flag_err", + htt_stats_buf->ax_su_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_mimo_ndp_sch_flag_err", + htt_stats_buf->ax_mu_mimo_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_brp_sch_flag_err", + htt_stats_buf->ax_mu_brp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_bar_sch_flag_err", + htt_stats_buf->ax_mu_bar_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_basic_trig_sch_flag_err", + htt_stats_buf->ax_basic_trig_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "ax_ulmumimo_trig_sch_status", + htt_stats_buf->ax_ulmumimo_trig_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "ax_ulmumimo_trig_sch_flag_err", + htt_stats_buf->ax_ulmumimo_trig_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n"); + + stats->buf_len = len; +} + +static void +ath12k_htt_print_tx_selfgen_be_sched_status_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats) +{ + const struct ath12k_htt_tx_selfgen_be_sched_status_stats_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats->buf; + u32 len = stats->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_SELFGEN_BE_SCHED_STATUS_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "be_su_ndpa_sch_status", + htt_stats_buf->be_su_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_su_ndp_sch_status", + htt_stats_buf->be_su_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_mimo_ndpa_sch_status", + htt_stats_buf->be_mu_mimo_ndpa_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_mimo_ndp_sch_status", + htt_stats_buf->be_mu_mimo_ndp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_brp_sch_status", + htt_stats_buf->be_mu_brp_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_bar_sch_status", + htt_stats_buf->be_mu_bar_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_basic_trig_sch_status", + htt_stats_buf->be_basic_trig_sch_status, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_su_ndp_sch_flag_err", + htt_stats_buf->be_su_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_mimo_ndp_sch_flag_err", + htt_stats_buf->be_mu_mimo_ndp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_brp_sch_flag_err", + htt_stats_buf->be_mu_brp_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_mu_bar_sch_flag_err", + htt_stats_buf->be_mu_bar_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_basic_trig_sch_flag_err", + htt_stats_buf->be_basic_trig_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n"); + len += print_array_to_buf(buf, len, "be_basic_trig_sch_flag_err", + htt_stats_buf->be_basic_trig_sch_flag_err, + ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS, "\n"); + len += print_array_to_buf(buf, len, "be_ulmumimo_trig_sch_flag_err", + htt_stats_buf->be_ulmumimo_trig_sch_flag_err, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS, "\n\n"); + + stats->buf_len = len; +} + +static void +ath12k_htt_print_stats_string_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_stats_string_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 i; + u16 index = 0; + u32 datum; + char data[ATH12K_HTT_MAX_STRING_LEN] = {0}; + + tag_len = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n"); + for (i = 0; i < tag_len; i++) { + datum = __le32_to_cpu(htt_stats_buf->data[i]); + index += scnprintf(&data[index], ATH12K_HTT_MAX_STRING_LEN - index, + "%.*s", 4, (char *)&datum); + if (index >= ATH12K_HTT_MAX_STRING_LEN) + break; + } + len += scnprintf(buf + len, buf_len - len, "data = %s\n\n", data); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sring_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sring_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + u32 avail_words; + u32 head_tail_ptr; + u32 sring_stat; + u32 tail_ptr; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__ring_id__arena__ep); + avail_words = __le32_to_cpu(htt_stats_buf->num_avail_words__num_valid_words); + head_tail_ptr = __le32_to_cpu(htt_stats_buf->head_ptr__tail_ptr); + sring_stat = __le32_to_cpu(htt_stats_buf->consumer_empty__producer_full); + tail_ptr = __le32_to_cpu(htt_stats_buf->prefetch_count__internal_tail_ptr); + + len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "ring_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_RING_ID)); + len += scnprintf(buf + len, buf_len - len, "arena = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_ARENA)); + len += scnprintf(buf + len, buf_len - len, "ep = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_SRING_STATS_EP)); + len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n", + le32_to_cpu(htt_stats_buf->base_addr_lsb)); + len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n", + le32_to_cpu(htt_stats_buf->base_addr_msb)); + len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n", + le32_to_cpu(htt_stats_buf->ring_size)); + len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n", + le32_to_cpu(htt_stats_buf->elem_size)); + len += scnprintf(buf + len, buf_len - len, "num_avail_words = %u\n", + u32_get_bits(avail_words, + ATH12K_HTT_SRING_STATS_NUM_AVAIL_WORDS)); + len += scnprintf(buf + len, buf_len - len, "num_valid_words = %u\n", + u32_get_bits(avail_words, + ATH12K_HTT_SRING_STATS_NUM_VALID_WORDS)); + len += scnprintf(buf + len, buf_len - len, "head_ptr = %u\n", + u32_get_bits(head_tail_ptr, ATH12K_HTT_SRING_STATS_HEAD_PTR)); + len += scnprintf(buf + len, buf_len - len, "tail_ptr = %u\n", + u32_get_bits(head_tail_ptr, ATH12K_HTT_SRING_STATS_TAIL_PTR)); + len += scnprintf(buf + len, buf_len - len, "consumer_empty = %u\n", + u32_get_bits(sring_stat, + ATH12K_HTT_SRING_STATS_CONSUMER_EMPTY)); + len += scnprintf(buf + len, buf_len - len, "producer_full = %u\n", + u32_get_bits(head_tail_ptr, + ATH12K_HTT_SRING_STATS_PRODUCER_FULL)); + len += scnprintf(buf + len, buf_len - len, "prefetch_count = %u\n", + u32_get_bits(tail_ptr, ATH12K_HTT_SRING_STATS_PREFETCH_COUNT)); + len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %u\n\n", + u32_get_bits(tail_ptr, + ATH12K_HTT_SRING_STATS_INTERNAL_TAIL_PTR)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sfm_cmn_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sfm_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n", + le32_to_cpu(htt_stats_buf->buf_total)); + len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n", + le32_to_cpu(htt_stats_buf->mem_empty)); + len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n", + le32_to_cpu(htt_stats_buf->deallocate_bufs)); + len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n", + le32_to_cpu(htt_stats_buf->num_records)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sfm_client_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sfm_client_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "client_id = %u\n", + le32_to_cpu(htt_stats_buf->client_id)); + len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n", + le32_to_cpu(htt_stats_buf->buf_min)); + len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n", + le32_to_cpu(htt_stats_buf->buf_max)); + len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n", + le32_to_cpu(htt_stats_buf->buf_busy)); + len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n", + le32_to_cpu(htt_stats_buf->buf_alloc)); + len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n", + le32_to_cpu(htt_stats_buf->buf_avail)); + len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n", + le32_to_cpu(htt_stats_buf->num_users)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sfm_client_user_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sfm_client_user_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV:\n"); + len += print_array_to_buf(buf, len, "dwords_used_by_user_n", + htt_stats_buf->dwords_used_by_user_n, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 i; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_mimo_sch_posted)); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n", + le32_to_cpu(htt_stats_buf->mu_mimo_sch_failed)); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_mimo_ppdu_posted)); + len += scnprintf(buf + len, buf_len - len, + "\nac_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0, + le32_to_cpu(htt_stats_buf->ac_mu_mimo_per_grp_sz[0])); + for (i = 1; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_sch_posted_per_group_index %u ", i); + len += scnprintf(buf + len, buf_len - len, + "(TOTAL STREAMS = %u) = %u\n", i + 1, + le32_to_cpu(htt_stats_buf->ac_mu_mimo_per_grp_sz[i])); + } + + for (i = 0; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_sch_posted_per_group_index %u ", + i + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS); + len += scnprintf(buf + len, buf_len - len, + "(TOTAL STREAMS = %u) = %u\n", + i + ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS + 1, + le32_to_cpu(htt_stats_buf->ac_mu_mimo_grp_sz_ext[i])); + } + + len += scnprintf(buf + len, buf_len - len, + "\nax_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0, + le32_to_cpu(htt_stats_buf->ax_mu_mimo_per_grp_sz[0])); + for (i = 1; i < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_sch_posted_per_group_index %u ", i); + len += scnprintf(buf + len, buf_len - len, + "(TOTAL STREAMS = %u) = %u\n", i + 1, + le32_to_cpu(htt_stats_buf->ax_mu_mimo_per_grp_sz[i])); + } + + len += scnprintf(buf + len, buf_len - len, + "\nbe_mu_mimo_sch_posted_per_group_index %u (SU) = %u\n", 0, + le32_to_cpu(htt_stats_buf->be_mu_mimo_per_grp_sz[0])); + for (i = 1; i < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "be_mu_mimo_sch_posted_per_group_index %u ", i); + len += scnprintf(buf + len, buf_len - len, + "(TOTAL STREAMS = %u) = %u\n", i + 1, + le32_to_cpu(htt_stats_buf->be_mu_mimo_per_grp_sz[i])); + } + + len += scnprintf(buf + len, buf_len - len, "\n11ac MU_MIMO SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_sch_nusers_"); + len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ac_mu_mimo_sch_nusers[i])); + } + + len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_sch_nusers_"); + len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_mu_mimo_sch_nusers[i])); + } + + len += scnprintf(buf + len, buf_len - len, "\n11be MU_MIMO SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, "be_mu_mimo_sch_nusers_"); + len += scnprintf(buf + len, buf_len - len, "%u = %u\n", i, + le32_to_cpu(htt_stats_buf->be_mu_mimo_sch_nusers[i])); + } + + len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_OFDMA_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_ofdma_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ofdma_sch_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_ofdma_basic_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_ofdma_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_ofdma_bsr_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_ofdma_bar_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_ofdma_bar_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_ofdma_brp_sch_nusers_%u = %u\n\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_ofdma_brp_nusers[i])); + } + + len += scnprintf(buf + len, buf_len - len, "11ax UL MUMIMO SCH STATS:\n"); + for (i = 0; i < ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS; i++) { + len += scnprintf(buf + len, buf_len - len, + "ax_ul_mumimo_basic_sch_nusers_%u = %u\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_mumimo_nusers[i])); + len += scnprintf(buf + len, buf_len - len, + "ax_ul_mumimo_brp_sch_nusers_%u = %u\n\n", i, + le32_to_cpu(htt_stats_buf->ax_ul_mumimo_brp_nusers[i])); + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_mumimo_grp_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + int j; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_MUMIMO_GRP_STATS:\n"); + len += print_array_to_buf(buf, len, + "dl_mumimo_grp_tputs_observed (per bin = 300 mbps)", + htt_stats_buf->dl_mumimo_grp_tputs, + ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n"); + len += print_array_to_buf(buf, len, "dl_mumimo_grp eligible", + htt_stats_buf->dl_mumimo_grp_eligible, + ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n"); + len += print_array_to_buf(buf, len, "dl_mumimo_grp_ineligible", + htt_stats_buf->dl_mumimo_grp_ineligible, + ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n"); + len += scnprintf(buf + len, buf_len - len, "dl_mumimo_grp_invalid:\n"); + for (j = 0; j < ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ; j++) { + len += scnprintf(buf + len, buf_len - len, "grp_id = %u", j); + len += print_array_to_buf(buf, len, "", + htt_stats_buf->dl_mumimo_grp_invalid, + ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE, + "\n"); + } + + len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size", + htt_stats_buf->ul_mumimo_grp_best_grp_size, + ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n"); + len += print_array_to_buf_index(buf, len, "ul_mumimo_grp_best_num_usrs = ", 1, + htt_stats_buf->ul_mumimo_grp_best_usrs, + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n"); + len += print_array_to_buf(buf, len, + "ul_mumimo_grp_tputs_observed (per bin = 300 mbps)", + htt_stats_buf->ul_mumimo_grp_tputs, + ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 user_index; + u32 tx_sched_mode; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + user_index = __le32_to_cpu(htt_stats_buf->user_index); + tx_sched_mode = __le32_to_cpu(htt_stats_buf->tx_sched_mode); + + if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) { + if (!user_index) + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n"); + + if (user_index < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS) { + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_queued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_queued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_tried_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_tried_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_failed_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_failed_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_requeued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_err_no_ba_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->err_no_ba_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdu_underrun_usr)); + len += scnprintf(buf + len, buf_len - len, + "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n", + user_index, + le32_to_cpu(htt_stats_buf->ampdu_underrun_usr)); + } + } + + if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) { + if (!user_index) + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n"); + + if (user_index < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS) { + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_queued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_queued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_tried_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_tried_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_failed_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_failed_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_requeued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_err_no_ba_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->err_no_ba_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdu_underrun_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n", + user_index, + le32_to_cpu(htt_stats_buf->ampdu_underrun_usr)); + } + } + + if (tx_sched_mode == ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) { + if (!user_index) + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n"); + + if (user_index < ATH12K_HTT_TX_NUM_OFDMA_USER_STATS) { + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_queued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_tried_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_failed_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdus_requeued_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_err_no_ba_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->err_no_ba_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->mpdu_underrun_usr)); + len += scnprintf(buf + len, buf_len - len, + "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n", + user_index, + le32_to_cpu(htt_stats_buf->ampdu_underrun_usr)); + } + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_CCA_STATS_HIST_TLV :\n"); + len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n", + le32_to_cpu(htt_stats_buf->chan_num)); + len += scnprintf(buf + len, buf_len - len, "num_records = %u\n", + le32_to_cpu(htt_stats_buf->num_records)); + len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n", + le32_to_cpu(htt_stats_buf->valid_cca_counters_bitmap)); + len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n", + le32_to_cpu(htt_stats_buf->collection_interval)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n"); + len += scnprintf(buf + len, buf_len - len, "tx_frame_usec = %u\n", + le32_to_cpu(htt_stats_buf->tx_frame_usec)); + len += scnprintf(buf + len, buf_len - len, "rx_frame_usec = %u\n", + le32_to_cpu(htt_stats_buf->rx_frame_usec)); + len += scnprintf(buf + len, buf_len - len, "rx_clear_usec = %u\n", + le32_to_cpu(htt_stats_buf->rx_clear_usec)); + len += scnprintf(buf + len, buf_len - len, "my_rx_frame_usec = %u\n", + le32_to_cpu(htt_stats_buf->my_rx_frame_usec)); + len += scnprintf(buf + len, buf_len - len, "usec_cnt = %u\n", + le32_to_cpu(htt_stats_buf->usec_cnt)); + len += scnprintf(buf + len, buf_len - len, "med_rx_idle_usec = %u\n", + le32_to_cpu(htt_stats_buf->med_rx_idle_usec)); + len += scnprintf(buf + len, buf_len - len, "med_tx_idle_global_usec = %u\n", + le32_to_cpu(htt_stats_buf->med_tx_idle_global_usec)); + len += scnprintf(buf + len, buf_len - len, "cca_obss_usec = %u\n\n", + le32_to_cpu(htt_stats_buf->cca_obss_usec)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 i; + static const char *access_cat_names[ATH12K_HTT_NUM_AC_WMM] = {"best effort", + "background", + "video", "voice"}; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_OBSS_PD_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "num_spatial_reuse_tx = %u\n", + le32_to_cpu(htt_stats_buf->num_sr_tx_transmissions)); + len += scnprintf(buf + len, buf_len - len, + "num_spatial_reuse_opportunities = %u\n", + le32_to_cpu(htt_stats_buf->num_spatial_reuse_opportunities)); + len += scnprintf(buf + len, buf_len - len, "num_non_srg_opportunities = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_opportunities)); + len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_ppdu_tried)); + len += scnprintf(buf + len, buf_len - len, "num_non_srg_ppdu_success = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_ppdu_success)); + len += scnprintf(buf + len, buf_len - len, "num_srg_opportunities = %u\n", + le32_to_cpu(htt_stats_buf->num_srg_opportunities)); + len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_srg_ppdu_tried)); + len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_success = %u\n", + le32_to_cpu(htt_stats_buf->num_srg_ppdu_success)); + len += scnprintf(buf + len, buf_len - len, "num_psr_opportunities = %u\n", + le32_to_cpu(htt_stats_buf->num_psr_opportunities)); + len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_psr_ppdu_tried)); + len += scnprintf(buf + len, buf_len - len, "num_psr_ppdu_success = %u\n", + le32_to_cpu(htt_stats_buf->num_psr_ppdu_success)); + len += scnprintf(buf + len, buf_len - len, "min_duration_check_flush_cnt = %u\n", + le32_to_cpu(htt_stats_buf->num_obss_min_dur_check_flush_cnt)); + len += scnprintf(buf + len, buf_len - len, "sr_ppdu_abort_flush_cnt = %u\n\n", + le32_to_cpu(htt_stats_buf->num_sr_ppdu_abort_flush_cnt)); + + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_OBSS_PD_PER_AC_STATS:\n"); + for (i = 0; i < ATH12K_HTT_NUM_AC_WMM; i++) { + len += scnprintf(buf + len, buf_len - len, "Access Category %u (%s)\n", + i, access_cat_names[i]); + len += scnprintf(buf + len, buf_len - len, + "num_non_srg_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_tried_per_ac[i])); + len += scnprintf(buf + len, buf_len - len, + "num_non_srg_ppdu_success = %u\n", + le32_to_cpu(htt_stats_buf->num_non_srg_success_ac[i])); + len += scnprintf(buf + len, buf_len - len, "num_srg_ppdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->num_srg_tried_per_ac[i])); + len += scnprintf(buf + len, buf_len - len, + "num_srg_ppdu_success = %u\n\n", + le32_to_cpu(htt_stats_buf->num_srg_success_per_ac[i])); + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_dmac_reset_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u64 time; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_DMAC_RESET_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "reset_count = %u\n", + le32_to_cpu(htt_stats_buf->reset_count)); + time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms, + htt_stats_buf->reset_time_lo_ms); + len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time); + + time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms, + htt_stats_buf->disengage_time_lo_ms); + len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time); + + time = ath12k_le32hilo_to_u64(htt_stats_buf->engage_time_hi_ms, + htt_stats_buf->engage_time_lo_ms); + len += scnprintf(buf + len, buf_len - len, "engage_time_ms = %llu\n", time); + + len += scnprintf(buf + len, buf_len - len, "disengage_count = %u\n", + le32_to_cpu(htt_stats_buf->disengage_count)); + len += scnprintf(buf + len, buf_len - len, "engage_count = %u\n", + le32_to_cpu(htt_stats_buf->engage_count)); + len += scnprintf(buf + len, buf_len - len, "drain_dest_ring_mask = 0x%x\n\n", + le32_to_cpu(htt_stats_buf->drain_dest_ring_mask)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_pdev_sched_algo_ofdma_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_SCHED_ALGO_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += print_array_to_buf(buf, len, "rate_based_dlofdma_enabled_count", + htt_stats_buf->rate_based_dlofdma_enabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "rate_based_dlofdma_disabled_count", + htt_stats_buf->rate_based_dlofdma_disabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "rate_based_dlofdma_probing_count", + htt_stats_buf->rate_based_dlofdma_disabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "rate_based_dlofdma_monitoring_count", + htt_stats_buf->rate_based_dlofdma_monitor_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_enabled_count", + htt_stats_buf->chan_acc_lat_based_dlofdma_enabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_disabled_count", + htt_stats_buf->chan_acc_lat_based_dlofdma_disabled_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "chan_acc_lat_based_dlofdma_monitoring_count", + htt_stats_buf->chan_acc_lat_based_dlofdma_monitor_cnt, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "downgrade_to_dl_su_ru_alloc_fail", + htt_stats_buf->downgrade_to_dl_su_ru_alloc_fail, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "candidate_list_single_user_disable_ofdma", + htt_stats_buf->candidate_list_single_user_disable_ofdma, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "dl_cand_list_dropped_high_ul_qos_weight", + htt_stats_buf->dl_cand_list_dropped_high_ul_qos_weight, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "ax_dlofdma_disabled_due_to_pipelining", + htt_stats_buf->ax_dlofdma_disabled_due_to_pipelining, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "dlofdma_disabled_su_only_eligible", + htt_stats_buf->dlofdma_disabled_su_only_eligible, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "dlofdma_disabled_consec_no_mpdus_tried", + htt_stats_buf->dlofdma_disabled_consec_no_mpdus_tried, + ATH12K_HTT_NUM_AC_WMM, "\n"); + len += print_array_to_buf(buf, len, "dlofdma_disabled_consec_no_mpdus_success", + htt_stats_buf->dlofdma_disabled_consec_no_mpdus_success, + ATH12K_HTT_NUM_AC_WMM, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + u8 i; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_RATE_STATS_BE_OFDMA_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "be_ofdma_tx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->be_ofdma_tx_ldpc)); + len += print_array_to_buf(buf, len, "be_ofdma_tx_mcs", + htt_stats_buf->be_ofdma_tx_mcs, + ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS, "\n"); + len += print_array_to_buf(buf, len, "be_ofdma_eht_sig_mcs", + htt_stats_buf->be_ofdma_eht_sig_mcs, + ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS, "\n"); + len += scnprintf(buf + len, buf_len - len, "be_ofdma_tx_ru_size = "); + for (i = 0; i < ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS; i++) + len += scnprintf(buf + len, buf_len - len, " %s:%u ", + ath12k_htt_be_tx_rx_ru_size_to_str(i), + le32_to_cpu(htt_stats_buf->be_ofdma_tx_ru_size[i])); + len += scnprintf(buf + len, buf_len - len, "\n"); + len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1, + htt_stats_buf->be_ofdma_tx_nss, + ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, + "\n"); + len += print_array_to_buf(buf, len, "be_ofdma_tx_bw", + htt_stats_buf->be_ofdma_tx_bw, + ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS, "\n"); + for (i = 0; i < ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS; i++) { + len += scnprintf(buf + len, buf_len - len, + "be_ofdma_tx_gi[%u]", i); + len += print_array_to_buf(buf, len, "", htt_stats_buf->gi[i], + ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS, "\n"); + } + len += scnprintf(buf + len, buf_len - len, "\n"); + + stats_req->buf_len = len; +} + static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, u16 tag, u16 len, const void *tag_buf, void *user_data) @@ -1552,6 +2801,83 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, case HTT_STATS_TX_DE_COMPL_STATS_TAG: ath12k_htt_print_tx_de_compl_stats_tlv(tag_buf, len, stats_req); break; + case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG: + ath12k_htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AC_STATS_TAG: + ath12k_htt_print_tx_selfgen_ac_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AX_STATS_TAG: + ath12k_htt_print_tx_selfgen_ax_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_BE_STATS_TAG: + ath12k_htt_print_tx_selfgen_be_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG: + ath12k_htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG: + ath12k_htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG: + ath12k_htt_print_tx_selfgen_be_err_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG: + ath12k_htt_print_tx_selfgen_ac_sched_status_stats_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG: + ath12k_htt_print_tx_selfgen_ax_sched_status_stats_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG: + ath12k_htt_print_tx_selfgen_be_sched_status_stats_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_STRING_TAG: + ath12k_htt_print_stats_string_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SRING_STATS_TAG: + ath12k_htt_print_sring_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SFM_CMN_TAG: + ath12k_htt_print_sfm_cmn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SFM_CLIENT_TAG: + ath12k_htt_print_sfm_client_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SFM_CLIENT_USER_TAG: + ath12k_htt_print_sfm_client_user_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG: + ath12k_htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG: + ath12k_htt_print_tx_pdev_mumimo_grp_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_MPDU_STATS_TAG: + ath12k_htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG: + case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG: + case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG: + ath12k_htt_print_pdev_cca_stats_hist_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_CCA_COUNTERS_TAG: + ath12k_htt_print_pdev_stats_cca_counters_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_OBSS_PD_TAG: + ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_DMAC_RESET_STATS_TAG: + ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG: + ath12k_htt_print_pdev_sched_algo_ofdma_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG: + ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, len, stats_req); + break; default: break; } @@ -1627,9 +2953,9 @@ static ssize_t ath12k_read_htt_stats_type(struct file *file, char buf[32]; size_t len; - mutex_lock(&ar->conf_mutex); + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); type = ar->debug.htt_stats.type; - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); len = scnprintf(buf, sizeof(buf), "%u\n", type); @@ -1662,7 +2988,7 @@ static ssize_t ath12k_write_htt_stats_type(struct file *file, type >= ATH12K_DBG_HTT_NUM_EXT_STATS) return -EINVAL; - mutex_lock(&ar->conf_mutex); + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); ar->debug.htt_stats.type = type; ar->debug.htt_stats.cfg_param[0] = cfg_param[0]; @@ -1670,7 +2996,7 @@ static ssize_t ath12k_write_htt_stats_type(struct file *file, ar->debug.htt_stats.cfg_param[2] = cfg_param[2]; ar->debug.htt_stats.cfg_param[3] = cfg_param[3]; - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); return count; } @@ -1691,7 +3017,7 @@ static int ath12k_debugfs_htt_stats_req(struct ath12k *ar) int ret, pdev_id; struct htt_ext_stats_cfg_params cfg_params = { 0 }; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); init_completion(&stats_req->htt_stats_rcvd); @@ -1741,7 +3067,7 @@ static int ath12k_open_htt_stats(struct inode *inode, if (type == ATH12K_DBG_HTT_EXT_STATS_RESET) return -EPERM; - mutex_lock(&ar->conf_mutex); + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); if (ah->state != ATH12K_HW_STATE_ON) { ret = -ENETDOWN; @@ -1776,14 +3102,14 @@ static int ath12k_open_htt_stats(struct inode *inode, file->private_data = stats_req; - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); return 0; out: kfree(stats_req); ar->debug.htt_stats.stats_req = NULL; err_unlock: - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); return ret; } @@ -1793,10 +3119,10 @@ static int ath12k_release_htt_stats(struct inode *inode, { struct ath12k *ar = inode->i_private; - mutex_lock(&ar->conf_mutex); + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); kfree(file->private_data); ar->debug.htt_stats.stats_req = NULL; - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); return 0; } @@ -1840,7 +3166,7 @@ static ssize_t ath12k_write_htt_stats_reset(struct file *file, type == ATH12K_DBG_HTT_EXT_STATS_RESET) return -E2BIG; - mutex_lock(&ar->conf_mutex); + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET; param_pos = (type >> 5) + 1; @@ -1866,12 +3192,12 @@ static ssize_t ath12k_write_htt_stats_reset(struct file *file, 0ULL); if (ret) { ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret); - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); return ret; } ar->debug.htt_stats.reset = type; - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); return count; } diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h index d52b26b23e65..ac86cab234ec 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h @@ -123,12 +123,21 @@ struct ath12k_htt_extd_stats_msg { /* htt_dbg_ext_stats_type */ enum ath12k_dbg_htt_ext_stats_type { - ATH12K_DBG_HTT_EXT_STATS_RESET = 0, - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, - ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, - ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, - ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, + ATH12K_DBG_HTT_EXT_STATS_RESET = 0, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, + ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, + ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, + ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12, + ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15, + ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, + ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, + ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, + ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45, + ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49, + ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51, /* keep this last */ ATH12K_DBG_HTT_NUM_EXT_STATS, @@ -139,6 +148,7 @@ enum ath12k_dbg_htt_tlv_tag { HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1, HTT_STATS_TX_PDEV_SIFS_TAG = 2, HTT_STATS_TX_PDEV_FLUSH_TAG = 3, + HTT_STATS_STRING_TAG = 5, HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11, HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12, HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13, @@ -151,22 +161,47 @@ enum ath12k_dbg_htt_tlv_tag { HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21, HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22, HTT_STATS_TX_DE_CMN_TAG = 23, + HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25, + HTT_STATS_SFM_CMN_TAG = 26, + HTT_STATS_SRING_STATS_TAG = 27, HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36, HTT_STATS_TX_SCHED_CMN_TAG = 37, HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39, + HTT_STATS_SFM_CLIENT_USER_TAG = 41, + HTT_STATS_SFM_CLIENT_TAG = 42, HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43, HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44, + HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG = 46, + HTT_STATS_TX_SELFGEN_CMN_STATS_TAG = 47, + HTT_STATS_TX_SELFGEN_AC_STATS_TAG = 48, + HTT_STATS_TX_SELFGEN_AX_STATS_TAG = 49, + HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG = 50, HTT_STATS_HW_INTR_MISC_TAG = 54, HTT_STATS_HW_PDEV_ERRS_TAG = 56, HTT_STATS_TX_DE_COMPL_STATS_TAG = 65, HTT_STATS_WHAL_TX_TAG = 66, HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67, + HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70, + HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71, + HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72, + HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73, + HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74, HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86, HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87, + HTT_STATS_PDEV_OBSS_PD_TAG = 88, HTT_STATS_HW_WAR_TAG = 89, HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100, HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102, + HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111, + HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112, HTT_STATS_MU_PPDU_DIST_TAG = 129, + HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG = 130, + HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG = 135, + HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG = 137, + HTT_STATS_TX_SELFGEN_BE_STATS_TAG = 138, + HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG = 139, + HTT_STATS_DMAC_RESET_STATS_TAG = 155, + HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG = 165, HTT_STATS_MAX_TAG, }; @@ -690,4 +725,401 @@ struct ath12k_htt_tx_de_compl_stats_tlv { __le32 tqm_bypass_frame; } __packed; +enum ath12k_htt_tx_mumimo_grp_invalid_reason_code_stats { + ATH12K_HTT_TX_MUMIMO_GRP_VALID, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_NUM_MU_USERS_EXCEEDED_MU_MAX_USERS, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_SCHED_ALGO_NOT_MU_COMPATIBLE_GID, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_NON_PRIMARY_GRP, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_ZERO_CANDIDATES, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MORE_CANDIDATES, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_SIZE_EXCEED_NSS, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_INELIGIBLE, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_GROUP_EFF_MU_TPUT_OMBPS, + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE, +}; + +#define ATH12K_HTT_NUM_AC_WMM 0x4 +#define ATH12K_HTT_MAX_NUM_SBT_INTR 4 +#define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS 4 +#define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS 8 +#define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS 8 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS 7 +#define ATH12K_HTT_TX_NUM_OFDMA_USER_STATS 74 +#define ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS 8 +#define ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ 8 +#define ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS 10 + +#define ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE \ + ATH12K_HTT_TX_MUMIMO_GRP_INVALID_MAX_REASON_CODE +#define ATH12K_HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS \ + (ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ * ATH12K_HTT_STATS_MAX_INVALID_REASON_CODE) + +struct ath12k_htt_tx_selfgen_cmn_stats_tlv { + __le32 mac_id__word; + __le32 su_bar; + __le32 rts; + __le32 cts2self; + __le32 qos_null; + __le32 delayed_bar_1; + __le32 delayed_bar_2; + __le32 delayed_bar_3; + __le32 delayed_bar_4; + __le32 delayed_bar_5; + __le32 delayed_bar_6; + __le32 delayed_bar_7; +} __packed; + +struct ath12k_htt_tx_selfgen_ac_stats_tlv { + __le32 ac_su_ndpa; + __le32 ac_su_ndp; + __le32 ac_mu_mimo_ndpa; + __le32 ac_mu_mimo_ndp; + __le32 ac_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS - 1]; +} __packed; + +struct ath12k_htt_tx_selfgen_ax_stats_tlv { + __le32 ax_su_ndpa; + __le32 ax_su_ndp; + __le32 ax_mu_mimo_ndpa; + __le32 ax_mu_mimo_ndp; + __le32 ax_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1]; + __le32 ax_basic_trigger; + __le32 ax_bsr_trigger; + __le32 ax_mu_bar_trigger; + __le32 ax_mu_rts_trigger; + __le32 ax_ulmumimo_trigger; +} __packed; + +struct ath12k_htt_tx_selfgen_be_stats_tlv { + __le32 be_su_ndpa; + __le32 be_su_ndp; + __le32 be_mu_mimo_ndpa; + __le32 be_mu_mimo_ndp; + __le32 be_mu_mimo_brpoll[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1]; + __le32 be_basic_trigger; + __le32 be_bsr_trigger; + __le32 be_mu_bar_trigger; + __le32 be_mu_rts_trigger; + __le32 be_ulmumimo_trigger; + __le32 be_su_ndpa_queued; + __le32 be_su_ndp_queued; + __le32 be_mu_mimo_ndpa_queued; + __le32 be_mu_mimo_ndp_queued; + __le32 be_mu_mimo_brpoll_queued[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1]; + __le32 be_ul_mumimo_trigger[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; +} __packed; + +struct ath12k_htt_tx_selfgen_ac_err_stats_tlv { + __le32 ac_su_ndp_err; + __le32 ac_su_ndpa_err; + __le32 ac_mu_mimo_ndpa_err; + __le32 ac_mu_mimo_ndp_err; + __le32 ac_mu_mimo_brp1_err; + __le32 ac_mu_mimo_brp2_err; + __le32 ac_mu_mimo_brp3_err; +} __packed; + +struct ath12k_htt_tx_selfgen_ax_err_stats_tlv { + __le32 ax_su_ndp_err; + __le32 ax_su_ndpa_err; + __le32 ax_mu_mimo_ndpa_err; + __le32 ax_mu_mimo_ndp_err; + __le32 ax_mu_mimo_brp_err[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS - 1]; + __le32 ax_basic_trigger_err; + __le32 ax_bsr_trigger_err; + __le32 ax_mu_bar_trigger_err; + __le32 ax_mu_rts_trigger_err; + __le32 ax_ulmumimo_trigger_err; +} __packed; + +struct ath12k_htt_tx_selfgen_be_err_stats_tlv { + __le32 be_su_ndp_err; + __le32 be_su_ndpa_err; + __le32 be_mu_mimo_ndpa_err; + __le32 be_mu_mimo_ndp_err; + __le32 be_mu_mimo_brp_err[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1]; + __le32 be_basic_trigger_err; + __le32 be_bsr_trigger_err; + __le32 be_mu_bar_trigger_err; + __le32 be_mu_rts_trigger_err; + __le32 be_ulmumimo_trigger_err; + __le32 be_mu_mimo_brp_err_num_cbf_rxd[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; + __le32 be_su_ndpa_flushed; + __le32 be_su_ndp_flushed; + __le32 be_mu_mimo_ndpa_flushed; + __le32 be_mu_mimo_ndp_flushed; + __le32 be_mu_mimo_brpoll_flushed[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS - 1]; + __le32 be_ul_mumimo_trigger_err[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; +} __packed; + +enum ath12k_htt_tx_selfgen_sch_tsflag_error_stats { + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_FLUSH_RCVD_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_FILT_SCHED_CMD_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_MISMATCH_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_MIMO_CTRL_MISMATCH_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_CBF_BW_MISMATCH_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RETRY_COUNT_FAIL_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_RESP_TOO_LATE_RECEIVED_ERR, + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_SIFS_STALL_NO_NEXT_CMD_ERR, + + ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS +}; + +struct ath12k_htt_tx_selfgen_ac_sched_status_stats_tlv { + __le32 ac_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ac_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ac_mu_mimo_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ac_mu_mimo_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; +} __packed; + +struct ath12k_htt_tx_selfgen_ax_sched_status_stats_tlv { + __le32 ax_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_mu_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_mu_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_mu_bar_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_mu_bar_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_basic_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_basic_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 ax_ulmumimo_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 ax_ulmumimo_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; +} __packed; + +struct ath12k_htt_tx_selfgen_be_sched_status_stats_tlv { + __le32 be_su_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_su_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_su_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_mu_mimo_ndpa_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_mu_mimo_ndp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_mu_mimo_ndp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_mu_brp_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_mu_brp_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_mu_bar_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_mu_bar_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_basic_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_basic_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; + __le32 be_ulmumimo_trig_sch_status[ATH12K_HTT_TX_PDEV_STATS_NUM_TX_ERR_STATUS]; + __le32 be_ulmumimo_trig_sch_flag_err[ATH12K_HTT_TX_SELFGEN_SCH_TSFLAG_ERR_STATS]; +} __packed; + +struct ath12k_htt_stats_string_tlv { + DECLARE_FLEX_ARRAY(__le32, data); +} __packed; + +#define ATH12K_HTT_SRING_STATS_MAC_ID GENMASK(7, 0) +#define ATH12K_HTT_SRING_STATS_RING_ID GENMASK(15, 8) +#define ATH12K_HTT_SRING_STATS_ARENA GENMASK(23, 16) +#define ATH12K_HTT_SRING_STATS_EP BIT(24) +#define ATH12K_HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0) +#define ATH12K_HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16) +#define ATH12K_HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0) +#define ATH12K_HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16) +#define ATH12K_HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0) +#define ATH12K_HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16) +#define ATH12K_HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0) +#define ATH12K_HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16) + +struct ath12k_htt_sring_stats_tlv { + __le32 mac_id__ring_id__arena__ep; + __le32 base_addr_lsb; + __le32 base_addr_msb; + __le32 ring_size; + __le32 elem_size; + __le32 num_avail_words__num_valid_words; + __le32 head_ptr__tail_ptr; + __le32 consumer_empty__producer_full; + __le32 prefetch_count__internal_tail_ptr; +} __packed; + +struct ath12k_htt_sfm_cmn_tlv { + __le32 mac_id__word; + __le32 buf_total; + __le32 mem_empty; + __le32 deallocate_bufs; + __le32 num_records; +} __packed; + +struct ath12k_htt_sfm_client_tlv { + __le32 client_id; + __le32 buf_min; + __le32 buf_max; + __le32 buf_busy; + __le32 buf_alloc; + __le32 buf_avail; + __le32 num_users; +} __packed; + +struct ath12k_htt_sfm_client_user_tlv { + DECLARE_FLEX_ARRAY(__le32, dwords_used_by_user_n); +} __packed; + +struct ath12k_htt_tx_pdev_mu_mimo_sch_stats_tlv { + __le32 mu_mimo_sch_posted; + __le32 mu_mimo_sch_failed; + __le32 mu_mimo_ppdu_posted; + __le32 ac_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS]; + __le32 ax_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS]; + __le32 ax_ofdma_sch_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_ofdma_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_ofdma_bsr_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_ofdma_bar_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_ofdma_brp_nusers[ATH12K_HTT_TX_NUM_OFDMA_USER_STATS]; + __le32 ax_ul_mumimo_nusers[ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS]; + __le32 ax_ul_mumimo_brp_nusers[ATH12K_HTT_TX_NUM_UL_MUMIMO_USER_STATS]; + __le32 ac_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS]; + __le32 ax_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS]; + __le32 be_mu_mimo_sch_nusers[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; + __le32 be_mu_mimo_per_grp_sz[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; + __le32 ac_mu_mimo_grp_sz_ext[ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS]; +} __packed; + +struct ath12k_htt_tx_pdev_mumimo_grp_stats_tlv { + __le32 dl_mumimo_grp_best_grp_size[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ]; + __le32 dl_mumimo_grp_best_num_usrs[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS]; + __le32 dl_mumimo_grp_eligible[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ]; + __le32 dl_mumimo_grp_ineligible[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ]; + __le32 dl_mumimo_grp_invalid[ATH12K_HTT_TX_NUM_MUMIMO_GRP_INVALID_WORDS]; + __le32 dl_mumimo_grp_tputs[ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS]; + __le32 ul_mumimo_grp_best_grp_size[ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ]; + __le32 ul_mumimo_grp_best_usrs[ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS]; + __le32 ul_mumimo_grp_tputs[ATH12K_HTT_STATS_MUMIMO_TPUT_NUM_BINS]; +} __packed; + +enum ath12k_htt_stats_tx_sched_modes { + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC = 0, + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX, + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX, + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_OFDMA_BE, + ATH12K_HTT_STATS_TX_SCHED_MODE_MU_MIMO_BE +}; + +struct ath12k_htt_tx_pdev_mpdu_stats_tlv { + __le32 mpdus_queued_usr; + __le32 mpdus_tried_usr; + __le32 mpdus_failed_usr; + __le32 mpdus_requeued_usr; + __le32 err_no_ba_usr; + __le32 mpdu_underrun_usr; + __le32 ampdu_underrun_usr; + __le32 user_index; + __le32 tx_sched_mode; +} __packed; + +struct ath12k_htt_pdev_stats_cca_counters_tlv { + __le32 tx_frame_usec; + __le32 rx_frame_usec; + __le32 rx_clear_usec; + __le32 my_rx_frame_usec; + __le32 usec_cnt; + __le32 med_rx_idle_usec; + __le32 med_tx_idle_global_usec; + __le32 cca_obss_usec; +} __packed; + +struct ath12k_htt_pdev_cca_stats_hist_v1_tlv { + __le32 chan_num; + __le32 num_records; + __le32 valid_cca_counters_bitmap; + __le32 collection_interval; +} __packed; + +struct ath12k_htt_pdev_obss_pd_stats_tlv { + __le32 num_obss_tx_ppdu_success; + __le32 num_obss_tx_ppdu_failure; + __le32 num_sr_tx_transmissions; + __le32 num_spatial_reuse_opportunities; + __le32 num_non_srg_opportunities; + __le32 num_non_srg_ppdu_tried; + __le32 num_non_srg_ppdu_success; + __le32 num_srg_opportunities; + __le32 num_srg_ppdu_tried; + __le32 num_srg_ppdu_success; + __le32 num_psr_opportunities; + __le32 num_psr_ppdu_tried; + __le32 num_psr_ppdu_success; + __le32 num_non_srg_tried_per_ac[ATH12K_HTT_NUM_AC_WMM]; + __le32 num_non_srg_success_ac[ATH12K_HTT_NUM_AC_WMM]; + __le32 num_srg_tried_per_ac[ATH12K_HTT_NUM_AC_WMM]; + __le32 num_srg_success_per_ac[ATH12K_HTT_NUM_AC_WMM]; + __le32 num_obss_min_dur_check_flush_cnt; + __le32 num_sr_ppdu_abort_flush_cnt; +} __packed; + +struct ath12k_htt_dmac_reset_stats_tlv { + __le32 reset_count; + __le32 reset_time_lo_ms; + __le32 reset_time_hi_ms; + __le32 disengage_time_lo_ms; + __le32 disengage_time_hi_ms; + __le32 engage_time_lo_ms; + __le32 engage_time_hi_ms; + __le32 disengage_count; + __le32 engage_count; + __le32 drain_dest_ring_mask; +} __packed; + +struct ath12k_htt_pdev_sched_algo_ofdma_stats_tlv { + __le32 mac_id__word; + __le32 rate_based_dlofdma_enabled_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 rate_based_dlofdma_disabled_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 rate_based_dlofdma_probing_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 rate_based_dlofdma_monitor_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 chan_acc_lat_based_dlofdma_enabled_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 chan_acc_lat_based_dlofdma_disabled_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 chan_acc_lat_based_dlofdma_monitor_cnt[ATH12K_HTT_NUM_AC_WMM]; + __le32 downgrade_to_dl_su_ru_alloc_fail[ATH12K_HTT_NUM_AC_WMM]; + __le32 candidate_list_single_user_disable_ofdma[ATH12K_HTT_NUM_AC_WMM]; + __le32 dl_cand_list_dropped_high_ul_qos_weight[ATH12K_HTT_NUM_AC_WMM]; + __le32 ax_dlofdma_disabled_due_to_pipelining[ATH12K_HTT_NUM_AC_WMM]; + __le32 dlofdma_disabled_su_only_eligible[ATH12K_HTT_NUM_AC_WMM]; + __le32 dlofdma_disabled_consec_no_mpdus_tried[ATH12K_HTT_NUM_AC_WMM]; + __le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM]; +} __packed; + +enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE { + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52_26, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_106_26, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_242, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_484_242, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996_484_242, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x2_484, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x3_484, + ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_996x4, + ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS, +}; + +#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 +#define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS 16 +#define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS 5 +#define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS 4 +#define ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS 4 + +struct ath12k_htt_tx_pdev_rate_stats_be_ofdma_tlv { + __le32 mac_id__word; + __le32 be_ofdma_tx_ldpc; + __le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS]; + __le32 be_ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS]; + __le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS]; + __le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS]; + __le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS]; +} __packed; + #endif diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index 61aa78d8bd8c..c99e9ceb1a6e 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -327,20 +327,22 @@ int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring, } static -u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif) +u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, + struct ath12k_link_vif *arvif) { u32 bank_config = 0; + struct ath12k_vif *ahvif = arvif->ahvif; /* Only valid for raw frames with HW crypto enabled. * With SW crypto, mac80211 sets key per packet */ - if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && + if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) bank_config |= - u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher), + u32_encode_bits(ath12k_dp_tx_get_encrypt_type(ahvif->key_cipher), HAL_TX_BANK_CONFIG_ENCRYPT_TYPE); - bank_config |= u32_encode_bits(arvif->tx_encap_type, + bank_config |= u32_encode_bits(ahvif->tx_encap_type, HAL_TX_BANK_CONFIG_ENCAP_TYPE); bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) | u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) | @@ -355,7 +357,7 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif HAL_TX_ADDRY_EN), HAL_TX_BANK_CONFIG_ADDRY_EN); - bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0, + bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(ahvif->vif) ? 3 : 0, HAL_TX_BANK_CONFIG_MESH_EN) | u32_encode_bits(arvif->vdev_id_check_en, HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN); @@ -365,7 +367,8 @@ u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif return bank_config; } -static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif, +static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, + struct ath12k_link_vif *arvif, struct ath12k_dp *dp) { int bank_id = DP_INVALID_BANK_ID; @@ -1099,9 +1102,9 @@ int ath12k_dp_htt_connect(struct ath12k_dp *dp) return 0; } -static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif) +static void ath12k_dp_update_vdev_search(struct ath12k_link_vif *arvif) { - switch (arvif->vdev_type) { + switch (arvif->ahvif->vdev_type) { case WMI_VDEV_TYPE_STA: /* TODO: Verify the search type and flags since ast hash * is not part of peer mapv3 @@ -1120,7 +1123,7 @@ static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif) } } -void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif) +void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif) { struct ath12k_base *ab = ar->ab; @@ -1162,7 +1165,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) spin_lock_bh(&dp->rx_desc_lock); for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { - desc_info = dp->spt_info->rxbaddr[i]; + desc_info = dp->rxbaddr[i]; for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { if (!desc_info[j].in_use) { @@ -1181,11 +1184,11 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) } for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) { - if (!dp->spt_info->rxbaddr[i]) + if (!dp->rxbaddr[i]) continue; - kfree(dp->spt_info->rxbaddr[i]); - dp->spt_info->rxbaddr[i] = NULL; + kfree(dp->rxbaddr[i]); + dp->rxbaddr[i] = NULL; } spin_unlock_bh(&dp->rx_desc_lock); @@ -1202,10 +1205,16 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) if (!skb) continue; - skb_cb = ATH12K_SKB_CB(skb); - ar = skb_cb->ar; - if (atomic_dec_and_test(&ar->dp.num_tx_pending)) - wake_up(&ar->dp.tx_empty_waitq); + /* if we are unregistering, hw would've been destroyed and + * ar is no longer valid. + */ + if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) { + skb_cb = ATH12K_SKB_CB(skb); + ar = skb_cb->ar; + + if (atomic_dec_and_test(&ar->dp.num_tx_pending)) + wake_up(&ar->dp.tx_empty_waitq); + } dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); @@ -1220,11 +1229,11 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) { tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; - if (!dp->spt_info->txbaddr[tx_spt_page]) + if (!dp->txbaddr[tx_spt_page]) continue; - kfree(dp->spt_info->txbaddr[tx_spt_page]); - dp->spt_info->txbaddr[tx_spt_page] = NULL; + kfree(dp->txbaddr[tx_spt_page]); + dp->txbaddr[tx_spt_page] = NULL; } spin_unlock_bh(&dp->tx_desc_lock[pool_id]); @@ -1241,6 +1250,7 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab) } kfree(dp->spt_info); + dp->spt_info = NULL; } static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab) @@ -1276,8 +1286,10 @@ void ath12k_dp_free(struct ath12k_base *ab) ath12k_dp_rx_reo_cmd_list_cleanup(ab); - for (i = 0; i < ab->hw_params->max_tx_ring; i++) + for (i = 0; i < ab->hw_params->max_tx_ring; i++) { kfree(dp->tx_ring[i].tx_status); + dp->tx_ring[i].tx_status = NULL; + } ath12k_dp_rx_free(ab); /* Deinit any SOC level resource */ @@ -1415,7 +1427,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i; cookie_ppt_idx = dp->rx_ppt_base + ppt_idx; - dp->spt_info->rxbaddr[i] = &rx_descs[0]; + dp->rxbaddr[i] = &rx_descs[0]; for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j); @@ -1445,7 +1457,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL; ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page; - dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0]; + dp->txbaddr[tx_spt_page] = &tx_descs[0]; for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j); diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index 2fb18b83b3ee..2e05fc19410e 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -16,6 +16,7 @@ struct ath12k_base; struct ath12k_peer; struct ath12k_dp; struct ath12k_vif; +struct ath12k_link_vif; struct hal_tcl_status_ring; struct ath12k_ext_irq_grp; @@ -300,8 +301,6 @@ struct ath12k_tx_desc_info { struct ath12k_spt_info { dma_addr_t paddr; u64 *vaddr; - struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES]; - struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES]; }; struct ath12k_reo_queue_ref { @@ -352,6 +351,8 @@ struct ath12k_dp { struct ath12k_spt_info *spt_info; u32 num_spt_pages; u32 rx_ppt_base; + struct ath12k_rx_desc_info *rxbaddr[ATH12K_NUM_RX_SPT_PAGES]; + struct ath12k_tx_desc_info *txbaddr[ATH12K_NUM_TX_SPT_PAGES]; struct list_head rx_desc_free_list; /* protects the free desc list */ spinlock_t rx_desc_lock; @@ -1799,7 +1800,7 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, struct ath12k_ext_irq_grp *irq_grp, int budget); int ath12k_dp_htt_connect(struct ath12k_dp *dp); -void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif); +void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_link_vif *arvif); void ath12k_dp_free(struct ath12k_base *ab); int ath12k_dp_alloc(struct ath12k_base *ab); void ath12k_dp_cc_config(struct ath12k_base *ab); diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 5c6749bc4039..494984133a91 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -26,15 +26,12 @@ ath12k_dp_mon_rx_populate_byte_count(const struct hal_rx_ppdu_end_user_stats *st void *ppduinfo, struct hal_rx_user_status *rx_user_status) { - u32 mpdu_ok_byte_count = __le32_to_cpu(stats->mpdu_ok_cnt); - u32 mpdu_err_byte_count = __le32_to_cpu(stats->mpdu_err_cnt); - rx_user_status->mpdu_ok_byte_count = - u32_get_bits(mpdu_ok_byte_count, - HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT); + le32_get_bits(stats->info7, + HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT); rx_user_status->mpdu_err_byte_count = - u32_get_bits(mpdu_err_byte_count, - HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT); + le32_get_bits(stats->info8, + HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT); } static void @@ -593,12 +590,20 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, struct hal_rx_ppdu_start *ppdu_start = (struct hal_rx_ppdu_start *)tlv_data; + u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32, + ppdu_start->ppdu_start_ts_31_0); + info[0] = __le32_to_cpu(ppdu_start->info0); - ppdu_info->ppdu_id = - u32_get_bits(info[0], HAL_RX_PPDU_START_INFO0_PPDU_ID); - ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num); - ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts); + ppdu_info->ppdu_id = u32_get_bits(info[0], + HAL_RX_PPDU_START_INFO0_PPDU_ID); + + info[1] = __le32_to_cpu(ppdu_start->info1); + ppdu_info->chan_num = u32_get_bits(info[1], + HAL_RX_PPDU_START_INFO1_CHAN_NUM); + ppdu_info->freq = u32_get_bits(info[1], + HAL_RX_PPDU_START_INFO1_CHAN_FREQ); + ppdu_info->ppdu_ts = ppdu_ts; if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) { ppdu_info->last_ppdu_id = ppdu_info->ppdu_id; @@ -726,33 +731,20 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, case HAL_PHYRX_RSSI_LEGACY: { struct hal_rx_phyrx_rssi_legacy_info *rssi = (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data; - u32 reception_type = 0; - u32 rssi_legacy_info = __le32_to_cpu(rssi->rsvd[0]); info[0] = __le32_to_cpu(rssi->info0); + info[1] = __le32_to_cpu(rssi->info1); /* TODO: Please note that the combined rssi will not be accurate * in MU case. Rssi in MU needs to be retrieved from * PHYRX_OTHER_RECEIVE_INFO TLV. */ ppdu_info->rssi_comb = - u32_get_bits(info[0], - HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB); - reception_type = - u32_get_bits(rssi_legacy_info, - HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION); - - switch (reception_type) { - case HAL_RECEPTION_TYPE_ULOFMDA: - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; - break; - case HAL_RECEPTION_TYPE_ULMIMO: - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; - break; - default: - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; - break; - } + u32_get_bits(info[1], + HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB); + + ppdu_info->bw = u32_get_bits(info[0], + HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW); break; } case HAL_RXPCU_PPDU_END_INFO: { @@ -860,27 +852,29 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, return HAL_RX_MON_STATUS_PPDU_NOT_DONE; } -static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *msdu) +static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, + struct sk_buff *head_msdu, + struct sk_buff *tail_msdu) { u32 rx_pkt_offset, l2_hdr_offset; rx_pkt_offset = ar->ab->hal.hal_desc_sz; - l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab, - (struct hal_rx_desc *)msdu->data); - skb_pull(msdu, rx_pkt_offset + l2_hdr_offset); + l2_hdr_offset = + ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data); + skb_pull(head_msdu, rx_pkt_offset + l2_hdr_offset); } static struct sk_buff * -ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, - u32 mac_id, struct sk_buff *head_msdu, +ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id, + struct sk_buff *head_msdu, struct sk_buff *tail_msdu, struct ieee80211_rx_status *rxs, bool *fcs_err) { struct ath12k_base *ab = ar->ab; - struct sk_buff *msdu, *mpdu_buf, *prev_buf; - struct hal_rx_desc *rx_desc; + struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list; + struct hal_rx_desc *rx_desc, *tail_rx_desc; u8 *hdr_desc, *dest, decap_format; struct ieee80211_hdr_3addr *wh; - u32 err_bitmap; + u32 err_bitmap, frag_list_sum_len = 0; mpdu_buf = NULL; @@ -888,24 +882,30 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, goto err_merge_fail; rx_desc = (struct hal_rx_desc *)head_msdu->data; - err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); + tail_rx_desc = (struct hal_rx_desc *)tail_msdu->data; + err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, tail_rx_desc); if (err_bitmap & HAL_RX_MPDU_ERR_FCS) *fcs_err = true; - decap_format = ath12k_dp_rx_h_decap_type(ab, rx_desc); + decap_format = ath12k_dp_rx_h_decap_type(ab, tail_rx_desc); - ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs); + ath12k_dp_rx_h_ppdu(ar, tail_rx_desc, rxs); if (decap_format == DP_RX_DECAP_TYPE_RAW) { - ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu); + ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu); prev_buf = head_msdu; msdu = head_msdu->next; + head_frag_list = NULL; while (msdu) { - ath12k_dp_mon_rx_msdus_set_payload(ar, msdu); + ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu); + + if (!head_frag_list) + head_frag_list = msdu; + frag_list_sum_len += msdu->len; prev_buf = msdu; msdu = msdu->next; } @@ -913,6 +913,12 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, prev_buf->next = NULL; skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN); + if (head_frag_list) { + skb_shinfo(head_msdu)->frag_list = head_frag_list; + head_msdu->data_len = frag_list_sum_len; + head_msdu->len += head_msdu->data_len; + head_msdu->next = NULL; + } } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) { u8 qos_pkt = 0; @@ -929,7 +935,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, msdu = head_msdu; while (msdu) { - ath12k_dp_mon_rx_msdus_set_payload(ar, msdu); + ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu); if (qos_pkt) { dest = skb_push(msdu, sizeof(__le16)); if (!dest) @@ -1135,7 +1141,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct } static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, - struct sk_buff *head_msdu, + struct sk_buff *head_msdu, struct sk_buff *tail_msdu, struct hal_rx_mon_ppdu_info *ppduinfo, struct napi_struct *napi) { @@ -1144,7 +1150,8 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, struct ieee80211_rx_status *rxs = &dp->rx_status; bool fcs_err = false; - mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, head_msdu, + mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, + head_msdu, tail_msdu, rxs, &fcs_err); if (!mon_skb) goto mon_deliver_fail; @@ -1252,7 +1259,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, if (head_msdu && tail_msdu) { ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, - ppdu_info, napi); + tail_msdu, ppdu_info, napi); } kfree(mon_mpdu); @@ -1948,15 +1955,16 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id, struct dp_mon_tx_ppdu_info *tx_ppdu_info) { struct dp_mon_mpdu *tmp, *mon_mpdu; - struct sk_buff *head_msdu; + struct sk_buff *head_msdu, *tail_msdu; list_for_each_entry_safe(mon_mpdu, tmp, &tx_ppdu_info->dp_tx_mon_mpdu_list, list) { list_del(&mon_mpdu->list); head_msdu = mon_mpdu->head; + tail_msdu = mon_mpdu->tail; if (head_msdu) - ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, + ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu, &tx_ppdu_info->rx_status, napi); kfree(mon_mpdu); @@ -2165,7 +2173,7 @@ ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_st } static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, - struct ath12k_sta *arsta, + struct ath12k_link_sta *arsta, struct hal_rx_mon_ppdu_info *ppdu_info) { struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats; @@ -2321,7 +2329,8 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar, struct hal_rx_mon_ppdu_info *ppdu_info, u32 uid) { - struct ath12k_sta *arsta = NULL; + struct ath12k_sta *ahsta; + struct ath12k_link_sta *arsta; struct ath12k_rx_peer_stats *rx_stats = NULL; struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid]; struct ath12k_peer *peer; @@ -2338,7 +2347,8 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar, return; } - arsta = ath12k_sta_to_arsta(peer->sta); + ahsta = ath12k_sta_to_ahsta(peer->sta); + arsta = &ahsta->deflink; rx_stats = arsta->rx_stats; if (!rx_stats) @@ -2445,7 +2455,8 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, struct dp_srng *mon_dst_ring; struct hal_srng *srng; struct dp_rxdma_mon_ring *buf_ring; - struct ath12k_sta *arsta = NULL; + struct ath12k_sta *ahsta = NULL; + struct ath12k_link_sta *arsta; struct ath12k_peer *peer; u64 cookie; int num_buffs_reaped = 0, srng_id, buf_id; @@ -2514,7 +2525,8 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, } if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { - arsta = ath12k_sta_to_arsta(peer->sta); + ahsta = ath12k_sta_to_ahsta(peer->sta); + arsta = &ahsta->deflink; ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, ppdu_info); } else if ((ppdu_info->fc_valid) && diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 91e3393f7b5f..9ae579e50557 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -1041,13 +1041,14 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar, struct ieee80211_ampdu_params *params) { struct ath12k_base *ab = ar->ab; - struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta); + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); + struct ath12k_link_sta *arsta = &ahsta->deflink; int vdev_id = arsta->arvif->vdev_id; int ret; ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id, params->tid, params->buf_size, - params->ssn, arsta->pn_type); + params->ssn, arsta->ahsta->pn_type); if (ret) ath12k_warn(ab, "failed to setup rx tid %d\n", ret); @@ -1059,7 +1060,8 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, { struct ath12k_base *ab = ar->ab; struct ath12k_peer *peer; - struct ath12k_sta *arsta = ath12k_sta_to_arsta(params->sta); + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta); + struct ath12k_link_sta *arsta = &ahsta->deflink; int vdev_id = arsta->arvif->vdev_id; bool active; int ret; @@ -1091,7 +1093,7 @@ int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, return ret; } -int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif, +int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, const u8 *peer_addr, enum set_key_cmd key_cmd, struct ieee80211_key_conf *key) @@ -1313,7 +1315,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar, struct ath12k_base *ab = ar->ab; struct ath12k_peer *peer; struct ieee80211_sta *sta; - struct ath12k_sta *arsta; + struct ath12k_sta *ahsta; + struct ath12k_link_sta *arsta; struct htt_ppdu_stats_user_rate *user_rate; struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats; struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user]; @@ -1394,7 +1397,8 @@ ath12k_update_per_peer_tx_stats(struct ath12k *ar, } sta = peer->sta; - arsta = ath12k_sta_to_arsta(sta); + ahsta = ath12k_sta_to_ahsta(sta); + arsta = &ahsta->deflink; memset(&arsta->txrate, 0, sizeof(arsta->txrate)); diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h index eb1f92559179..bfd4f814553e 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@ -88,7 +88,7 @@ int ath12k_dp_rx_ampdu_start(struct ath12k *ar, struct ieee80211_ampdu_params *params); int ath12k_dp_rx_ampdu_stop(struct ath12k *ar, struct ieee80211_ampdu_params *params); -int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif, +int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif, const u8 *peer_addr, enum set_key_cmd key_cmd, struct ieee80211_key_conf *key); diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 44406e0b4a34..a8d341a6df01 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -10,7 +10,7 @@ #include "hw.h" static enum hal_tcl_encap_type -ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb) +ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ath12k_base *ab = arvif->ar->ab; @@ -216,7 +216,7 @@ out: return ret; } -int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, +int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, struct sk_buff *skb) { struct ath12k_base *ab = ar->ab; @@ -230,6 +230,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, struct sk_buff *skb_ext_desc; struct hal_srng *tcl_ring; struct ieee80211_hdr *hdr = (void *)skb->data; + struct ath12k_vif *ahvif = arvif->ahvif; struct dp_tx_ring *tx_ring; u8 pool_id; u8 hal_ring_id; @@ -274,7 +275,7 @@ tcl_ring_sel: ti.bank_id = arvif->bank_id; ti.meta_data_flags = arvif->tcl_metadata; - if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && + if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW && test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) { if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) { ti.encrypt_type = @@ -376,7 +377,7 @@ map: ti.desc_id = tx_desc->desc_id; ti.data_len = skb->len; skb_cb->paddr = ti.paddr; - skb_cb->vif = arvif->vif; + skb_cb->vif = ahvif->vif; skb_cb->ar = ar; if (msdu_ext_desc) { diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h index 55ff8cc721e3..46dce23501f3 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.h +++ b/drivers/net/wireless/ath/ath12k/dp_tx.h @@ -16,7 +16,7 @@ struct ath12k_dp_htt_wbm_tx_status { }; int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab); -int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, +int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, struct sk_buff *skb); void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id); diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c index ca04bfae8bdc..fd98fac16dd5 100644 --- a/drivers/net/wireless/ath/ath12k/hal.c +++ b/drivers/net/wireless/ath/ath12k/hal.c @@ -385,13 +385,13 @@ static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) { return le32_get_bits(desc->u.qcn9274.msdu_end.info12, - RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP); + RX_MSDU_END_INFO12_MIMO_SS_BITMAP); } static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) { return le16_get_bits(desc->u.qcn9274.msdu_end.info5, - RX_MSDU_END_QCN9274_INFO5_TID); + RX_MSDU_END_INFO5_TID); } static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) @@ -846,13 +846,13 @@ static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_pkt_type(struct hal_rx_desc static u8 ath12k_hw_qcn9274_compact_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) { return le32_get_bits(desc->u.qcn9274_compact.msdu_end.info12, - RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP); + RX_MSDU_END_INFO12_MIMO_SS_BITMAP); } static u8 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) { return le16_get_bits(desc->u.qcn9274_compact.msdu_end.info5, - RX_MSDU_END_QCN9274_INFO5_TID); + RX_MSDU_END_INFO5_TID); } static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc) @@ -1198,7 +1198,7 @@ static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc) static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc) { return le32_get_bits(desc->u.wcn7850.msdu_end.info12, - RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP); + RX_MSDU_END_INFO12_MIMO_SS_BITMAP); } static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc) @@ -1216,7 +1216,7 @@ static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc, struct hal_rx_desc *ldesc) { memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end, - sizeof(struct rx_msdu_end_wcn7850)); + sizeof(struct rx_msdu_end_qcn9274)); } static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc) diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h index 095216eabc01..2de7b0eba9f2 100644 --- a/drivers/net/wireless/ath/ath12k/hal_rx.h +++ b/drivers/net/wireless/ath/ath12k/hal_rx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HAL_RX_H @@ -156,6 +156,7 @@ struct hal_rx_mon_ppdu_info { u32 preamble_type; u32 mpdu_len; u16 chan_num; + u16 freq; u16 tcp_msdu_count; u16 tcp_ack_msdu_count; u16 udp_msdu_count; @@ -232,21 +233,25 @@ struct hal_rx_mon_ppdu_info { u8 medium_prot_type; }; -#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0) +#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0) +#define HAL_RX_PPDU_START_INFO1_CHAN_NUM GENMASK(15, 0) +#define HAL_RX_PPDU_START_INFO1_CHAN_FREQ GENMASK(31, 16) struct hal_rx_ppdu_start { __le32 info0; - __le32 chan_num; - __le32 ppdu_start_ts; + __le32 info1; + __le32 ppdu_start_ts_31_0; + __le32 ppdu_start_ts_63_32; + __le32 rsvd[2]; } __packed; -#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16) +#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(26, 16) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11) -#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(10, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(11) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(12) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(13) +#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(24, 21) #define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0) #define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16) @@ -262,8 +267,8 @@ struct hal_rx_ppdu_start { #define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0) #define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16) -#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT GENMASK(24, 0) -#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT GENMASK(24, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO7_MPDU_OK_BYTE_COUNT GENMASK(24, 0) +#define HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0) struct hal_rx_ppdu_end_user_stats { __le32 rsvd0[2]; @@ -278,9 +283,9 @@ struct hal_rx_ppdu_end_user_stats { __le32 usr_resp_ref; __le32 info6; __le32 rsvd3[4]; - __le32 mpdu_ok_cnt; + __le32 info7; __le32 rsvd4; - __le32 mpdu_err_cnt; + __le32 info8; __le32 rsvd5[2]; __le32 usr_resp_ref_ext; __le32 rsvd6; @@ -436,23 +441,27 @@ enum hal_rx_ul_reception_type { HAL_RECEPTION_TYPE_FRAMELESS }; -#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8) -#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION GENMASK(3, 0) +#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RECEPTION GENMASK(3, 0) +#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW GENMASK(7, 5) +#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO1_RSSI_COMB GENMASK(15, 8) struct hal_rx_phyrx_rssi_legacy_info { - __le32 rsvd[35]; __le32 info0; + __le32 rsvd0[39]; + __le32 info1; + __le32 rsvd1; } __packed; -#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16) -#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16) -#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0) +#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16) +#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16) +#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0) struct hal_rx_mpdu_start { + __le32 rsvd0[9]; __le32 info0; __le32 info1; - __le32 rsvd1[11]; + __le32 rsvd1[2]; __le32 info2; - __le32 rsvd2[9]; + __le32 rsvd2[16]; } __packed; #define HAL_RX_PPDU_END_DURATION GENMASK(23, 0) diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h index 0e53ec269fa4..e8840fab6061 100644 --- a/drivers/net/wireless/ath/ath12k/hif.h +++ b/drivers/net/wireless/ath/ath12k/hif.h @@ -31,6 +31,7 @@ struct ath12k_hif_ops { void (*ce_irq_disable)(struct ath12k_base *ab); void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx); int (*panic_handler)(struct ath12k_base *ab); + void (*coredump_download)(struct ath12k_base *ab); }; static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, @@ -156,4 +157,9 @@ static inline int ath12k_hif_panic_handler(struct ath12k_base *ab) return ab->hif.ops->panic_handler(ab); } +static inline void ath12k_hif_coredump_download(struct ath12k_base *ab) +{ + if (ab->hif.ops->coredump_download) + ab->hif.ops->coredump_download(ab); +} #endif /* ATH12K_HIF_H */ diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index ec1bda95e555..b7b583fadb5a 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -913,7 +913,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .rfkill_cfg = 0, .rfkill_on_level = 0, - .rddm_size = 0, + .rddm_size = 0x600000, .def_num_link = 0, .max_mlo_peer = 256, @@ -1069,7 +1069,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .rfkill_cfg = 0, .rfkill_on_level = 0, - .rddm_size = 0, + .rddm_size = 0x600000, .def_num_link = 0, .max_mlo_peer = 256, diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index acf5628adda5..d493ec812055 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -250,10 +250,10 @@ static const u32 ath12k_smps_map[] = { }; static int ath12k_start_vdev_delay(struct ath12k *ar, - struct ath12k_vif *arvif); + struct ath12k_link_vif *arvif); static void ath12k_mac_stop(struct ath12k *ar); -static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif); -static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif); +static int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif); +static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arvif); static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode) { @@ -476,18 +476,25 @@ static u8 ath12k_parse_mpdudensity(u8 mpdudensity) } } -static int ath12k_mac_vif_chan(struct ieee80211_vif *vif, - struct cfg80211_chan_def *def) +static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id, + struct cfg80211_chan_def *def) { + struct ieee80211_bss_conf *link_conf; struct ieee80211_chanctx_conf *conf; rcu_read_lock(); - conf = rcu_dereference(vif->bss_conf.chanctx_conf); + link_conf = rcu_dereference(vif->link_conf[link_id]); + + if (!link_conf) { + rcu_read_unlock(); + return -ENOLINK; + } + + conf = rcu_dereference(link_conf->chanctx_conf); if (!conf) { rcu_read_unlock(); return -ENOENT; } - *def = conf->def; rcu_read_unlock(); @@ -539,18 +546,33 @@ static void ath12k_get_arvif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ath12k_vif_iter *arvif_iter = data; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + unsigned long links_map = ahvif->links_map; + struct ath12k_link_vif *arvif; + u8 link_id; + + for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = rcu_dereference(ahvif->link[link_id]); + + if (WARN_ON(!arvif)) + continue; - if (arvif->vdev_id == arvif_iter->vdev_id && - arvif->ar == arvif_iter->ar) - arvif_iter->arvif = arvif; + if (arvif->vdev_id == arvif_iter->vdev_id && + arvif->ar == arvif_iter->ar) { + arvif_iter->arvif = arvif; + break; + } + } } -struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) +struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) { struct ath12k_vif_iter arvif_iter = {}; u32 flags; + /* To use the arvif returned, caller must have held rcu read lock. + */ + WARN_ON(!rcu_read_lock_any_held()); arvif_iter.vdev_id = vdev_id; arvif_iter.ar = ar; @@ -567,12 +589,12 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) return arvif_iter.arvif; } -struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, - u32 vdev_id) +struct ath12k_link_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, + u32 vdev_id) { int i; struct ath12k_pdev *pdev; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; for (i = 0; i < ab->num_radios; i++) { pdev = rcu_dereference(ab->pdevs_active[i]); @@ -658,7 +680,8 @@ static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw, static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif = &ahvif->deflink; struct ath12k_hw *ah = ath12k_hw_to_ah(hw); /* If there is one pdev within ah, then we return @@ -673,11 +696,12 @@ static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, return NULL; } -static struct ath12k_vif *ath12k_mac_get_vif_up(struct ath12k *ar) +static struct ath12k_link_vif *ath12k_mac_get_vif_up(struct ath12k *ar) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->is_up) return arvif; @@ -705,17 +729,17 @@ static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BA return false; } -static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_vif *arvif) +static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_link_vif *arvif) { struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; - struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_vif *vif = arvif->ahvif->vif; struct cfg80211_chan_def def; enum nl80211_band band; u8 pdev_id = ab->fw_pdev[0].pdev_id; int i; - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return pdev_id; band = def.chan->band; @@ -730,7 +754,7 @@ static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_vif *arvif) u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; struct ath12k_base *ab = ar->ab; if (!ab->hw_params->single_pdev_only) @@ -770,11 +794,11 @@ static void ath12k_pdev_caps_update(struct ath12k *ar) static int ath12k_mac_txpower_recalc(struct ath12k *ar) { struct ath12k_pdev *pdev = ar->pdev; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret, txpower = -1; u32 param; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); list_for_each_entry(arvif, &ar->arvifs, list) { if (arvif->txpower <= 0) @@ -824,13 +848,13 @@ fail: return ret; } -static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif) +static int ath12k_recalc_rtscts_prot(struct ath12k_link_vif *arvif) { struct ath12k *ar = arvif->ar; u32 vdev_param, rts_cts; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS; @@ -863,7 +887,7 @@ static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif) return ret; } -static int ath12k_mac_set_kickout(struct ath12k_vif *arvif) +static int ath12k_mac_set_kickout(struct ath12k_link_vif *arvif) { struct ath12k *ar = arvif->ar; u32 param; @@ -913,11 +937,14 @@ void ath12k_mac_peer_cleanup_all(struct ath12k *ar) struct ath12k_peer *peer, *tmp; struct ath12k_base *ab = ar->ab; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); spin_lock_bh(&ab->base_lock); list_for_each_entry_safe(peer, tmp, &ab->peers, list) { - ath12k_dp_rx_peer_tid_cleanup(ar, peer); + /* Skip Rx TID cleanup for self peer */ + if (peer->sta) + ath12k_dp_rx_peer_tid_cleanup(ar, peer); + list_del(&peer->list); kfree(peer); } @@ -929,7 +956,7 @@ void ath12k_mac_peer_cleanup_all(struct ath12k *ar) static int ath12k_mac_vdev_setup_sync(struct ath12k *ar) { - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) return -ESHUTDOWN; @@ -971,7 +998,7 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id, struct ath12k_wmi_vdev_up_params params = {}; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); channel = chandef->chan; arg.vdev_id = vdev_id; @@ -1034,7 +1061,7 @@ static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar) { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); reinit_completion(&ar->vdev_setup_done); @@ -1064,9 +1091,8 @@ static int ath12k_mac_monitor_vdev_create(struct ath12k *ar) struct ath12k_wmi_vdev_create_arg arg = {}; int bit, ret; u8 tmp_addr[6]; - u16 nss; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (ar->monitor_vdev_created) return 0; @@ -1104,19 +1130,6 @@ static int ath12k_mac_monitor_vdev_create(struct ath12k *ar) return ret; } - nss = hweight32(ar->cfg_tx_chainmask) ? : 1; - ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id, - WMI_VDEV_PARAM_NSS, nss); - if (ret) { - ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", - ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret); - return ret; - } - - ret = ath12k_mac_txpower_recalc(ar); - if (ret) - return ret; - ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id; ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); ar->num_created_vdevs++; @@ -1132,7 +1145,7 @@ static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar) int ret; unsigned long time_left; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (!ar->monitor_vdev_created) return 0; @@ -1178,7 +1191,7 @@ static int ath12k_mac_monitor_start(struct ath12k *ar) struct cfg80211_chan_def *chandef = NULL; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (ar->monitor_started) return 0; @@ -1208,7 +1221,7 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar) { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (!ar->monitor_started) return 0; @@ -1226,12 +1239,13 @@ static int ath12k_mac_monitor_stop(struct ath12k *ar) return ret; } -static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif) +static int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif) { + struct ath12k_vif *ahvif = arvif->ahvif; struct ath12k *ar = arvif->ar; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); reinit_completion(&ar->vdev_setup_done); @@ -1253,7 +1267,7 @@ static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif) ar->num_started_vdevs--; ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n", - arvif->vif->addr, arvif->vdev_id); + ahvif->vif->addr, arvif->vdev_id); if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); @@ -1272,36 +1286,33 @@ static int ath12k_mac_config(struct ath12k *ar, u32 changed) struct ieee80211_conf *conf = &hw->conf; int ret = 0; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR; if (ar->monitor_conf_enabled) { if (ar->monitor_vdev_created) - goto exit; + return ret; ret = ath12k_mac_monitor_vdev_create(ar); if (ret) - goto exit; + return ret; ret = ath12k_mac_monitor_start(ar); if (ret) goto err_mon_del; } else { if (!ar->monitor_vdev_created) - goto exit; + return ret; ret = ath12k_mac_monitor_stop(ar); if (ret) - goto exit; + return ret; ath12k_mac_monitor_vdev_delete(ar); } } -exit: - mutex_unlock(&ar->conf_mutex); return ret; err_mon_del: ath12k_mac_monitor_vdev_delete(ar); - mutex_unlock(&ar->conf_mutex); return ret; } @@ -1311,6 +1322,8 @@ static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) struct ath12k *ar; int ret; + lockdep_assert_wiphy(hw->wiphy); + ar = ath12k_ah_to_ar(ah, 0); ret = ath12k_mac_config(ar, changed); @@ -1321,7 +1334,7 @@ static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) return ret; } -static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_vif *arvif, +static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_link_vif *arvif, struct sk_buff *bcn) { struct ath12k *ar = arvif->ar; @@ -1378,7 +1391,7 @@ static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, return 0; } -static void ath12k_mac_set_arvif_ies(struct ath12k_vif *arvif, struct sk_buff *bcn, +static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_buff *bcn, u8 bssid_index, bool *nontx_profile_found) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)bcn->data; @@ -1470,19 +1483,22 @@ static void ath12k_mac_set_arvif_ies(struct ath12k_vif *arvif, struct sk_buff *b } } -static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_vif *arvif) +static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif) { - struct ieee80211_bss_conf *bss_conf = &arvif->vif->bss_conf; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_bss_conf *bss_conf = &ahvif->vif->bss_conf; struct ath12k_wmi_bcn_tmpl_ema_arg ema_args; struct ieee80211_ema_beacons *beacons; - struct ath12k_vif *tx_arvif; + struct ath12k_link_vif *tx_arvif; bool nontx_profile_found = false; + struct ath12k_vif *tx_ahvif; int ret = 0; u8 i; - tx_arvif = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif); + tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif); + tx_arvif = &tx_ahvif->deflink; beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar), - tx_arvif->vif, 0); + tx_ahvif->vif, 0); if (!beacons || !beacons->cnt) { ath12k_warn(arvif->ar->ab, "failed to get ema beacon templates from mac80211\n"); @@ -1520,22 +1536,25 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_vif *arvif) return ret; } -static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) +static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif) { - struct ath12k_vif *tx_arvif = arvif; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ath12k_link_vif *tx_arvif = arvif; struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; - struct ieee80211_vif *vif = arvif->vif; struct ieee80211_mutable_offsets offs = {}; + struct ath12k_vif *tx_ahvif = ahvif; bool nontx_profile_found = false; struct sk_buff *bcn; int ret; - if (arvif->vdev_type != WMI_VDEV_TYPE_AP) + if (ahvif->vdev_type != WMI_VDEV_TYPE_AP) return 0; if (vif->mbssid_tx_vif) { - tx_arvif = ath12k_vif_to_arvif(vif->mbssid_tx_vif); + tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif); + tx_arvif = &tx_ahvif->deflink; if (tx_arvif != arvif && arvif->is_up) return 0; @@ -1543,7 +1562,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) return ath12k_mac_setup_bcn_tmpl_ema(arvif); } - bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_arvif->vif, + bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif, &offs, 0); if (!bcn) { ath12k_warn(ab, "failed to get beacon template from mac80211\n"); @@ -1554,14 +1573,14 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL); } else { ath12k_mac_set_arvif_ies(arvif, bcn, - arvif->vif->bss_conf.bssid_index, + ahvif->vif->bss_conf.bssid_index, &nontx_profile_found); if (!nontx_profile_found) ath12k_warn(ab, "nontransmitted profile not found in beacon template\n"); } - if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) { + if (ahvif->vif->type == NL80211_IFTYPE_AP && ahvif->vif->p2p) { ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn); if (ret) { ath12k_warn(ab, "failed to setup P2P GO bcn ie: %d\n", @@ -1595,14 +1614,15 @@ free_bcn_skb: return ret; } -static void ath12k_control_beaconing(struct ath12k_vif *arvif, +static void ath12k_control_beaconing(struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *info) { struct ath12k_wmi_vdev_up_params params = {}; + struct ath12k_vif *ahvif = arvif->ahvif; struct ath12k *ar = arvif->ar; int ret; - lockdep_assert_held(&arvif->ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(arvif->ar)->wiphy); if (!info->enable_beacon) { ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); @@ -1622,15 +1642,19 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif, return; } - arvif->aid = 0; + ahvif->aid = 0; ether_addr_copy(arvif->bssid, info->bssid); params.vdev_id = arvif->vdev_id; - params.aid = arvif->aid; + params.aid = ahvif->aid; params.bssid = arvif->bssid; - if (arvif->vif->mbssid_tx_vif) { - params.tx_bssid = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif)->bssid; + if (ahvif->vif->mbssid_tx_vif) { + struct ath12k_vif *tx_ahvif = + ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif); + struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink; + + params.tx_bssid = tx_arvif->bssid; params.nontx_profile_idx = info->bssid_index; params.nontx_profile_cnt = 1 << info->bssid_indicator; } @@ -1651,7 +1675,8 @@ static void ath12k_mac_handle_beacon_iter(void *data, u8 *mac, { struct sk_buff *skb = data; struct ieee80211_mgmt *mgmt = (void *)skb->data; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif = &ahvif->deflink; if (vif->type != NL80211_IFTYPE_STATION) return; @@ -1674,7 +1699,8 @@ static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { u32 *vdev_id = data; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif = &ahvif->deflink; struct ath12k *ar = arvif->ar; struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); @@ -1705,9 +1731,9 @@ void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id) static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work) { - struct ath12k_vif *arvif = container_of(work, struct ath12k_vif, - connection_loss_work.work); - struct ieee80211_vif *vif = arvif->vif; + struct ath12k_link_vif *arvif = container_of(work, struct ath12k_link_vif, + connection_loss_work.work); + struct ieee80211_vif *vif = arvif->ahvif->vif; if (!arvif->is_up) return; @@ -1716,15 +1742,16 @@ static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work) } static void ath12k_peer_assoc_h_basic(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); u32 aid; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); if (vif->type == NL80211_IFTYPE_STATION) aid = vif->cfg.aid; @@ -1742,21 +1769,22 @@ static void ath12k_peer_assoc_h_basic(struct ath12k *ar, } static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_chan_def def; struct cfg80211_bss *bss; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); const u8 *rsnie = NULL; const u8 *wpaie = NULL; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0, @@ -1804,11 +1832,12 @@ static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, } static void ath12k_peer_assoc_h_rates(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; struct cfg80211_chan_def def; const struct ieee80211_supported_band *sband; @@ -1819,9 +1848,9 @@ static void ath12k_peer_assoc_h_rates(struct ath12k *ar, u8 rate; int i; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; band = def.chan->band; @@ -1867,12 +1896,13 @@ ath12k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask) } static void ath12k_peer_assoc_h_ht(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; @@ -1880,9 +1910,9 @@ static void ath12k_peer_assoc_h_ht(struct ath12k *ar, u8 max_nss; u32 stbc; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; if (!ht_cap->ht_supported) @@ -2028,12 +2058,13 @@ ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set, } static void ath12k_peer_assoc_h_vht(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u16 *vht_mcs_mask; @@ -2042,7 +2073,9 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, u8 max_nss, vht_mcs; int i; - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; if (!vht_cap->vht_supported) @@ -2123,10 +2156,12 @@ static void ath12k_peer_assoc_h_vht(struct ath12k *ar, } static void ath12k_peer_assoc_h_he(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; int i; u8 ampdu_factor, max_nss; @@ -2278,16 +2313,18 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, } static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; struct cfg80211_chan_def def; enum nl80211_band band; u8 ampdu_factor, mpdu_density; - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; band = def.chan->band; @@ -2347,9 +2384,10 @@ static int ath12k_get_smps_from_capa(const struct ieee80211_sta_ht_cap *ht_cap, return 0; } -static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta, +static void ath12k_peer_assoc_h_smps(struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa; const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; int smps; @@ -2376,13 +2414,13 @@ static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta, } static void ath12k_peer_assoc_h_qos(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); - switch (arvif->vdev_type) { + switch (arvif->ahvif->vdev_type) { case WMI_VDEV_TYPE_AP: if (sta->wme) { /* TODO: Check WME vs QoS */ @@ -2412,15 +2450,16 @@ static void ath12k_peer_assoc_h_qos(struct ath12k *ar, } static int ath12k_peer_assoc_qos_ap(struct ath12k *ar, - struct ath12k_vif *arvif, - struct ieee80211_sta *sta) + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) { + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k_wmi_ap_ps_arg arg; u32 max_sp; u32 uapsd; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); arg.vdev_id = arvif->vdev_id; @@ -2574,18 +2613,22 @@ static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar, } static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct cfg80211_chan_def def; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; enum wmi_phy_mode phymode = MODE_UNKNOWN; - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); + + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; band = def.chan->band; @@ -2719,17 +2762,19 @@ static void ath12k_mac_set_eht_ppe_threshold(const u8 *ppe_thres, } static void ath12k_peer_assoc_h_eht(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg) { + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20; const struct ieee80211_eht_mcs_nss_supp_bw *bw; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); u32 *rx_mcs, *tx_mcs; + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht) return; @@ -2802,34 +2847,34 @@ static void ath12k_peer_assoc_h_eht(struct ath12k *ar, } static void ath12k_peer_assoc_prepare(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ath12k_wmi_peer_assoc_arg *arg, bool reassoc) { - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); memset(arg, 0, sizeof(*arg)); reinit_completion(&ar->peer_assoc_done); arg->peer_new_assoc = !reassoc; - ath12k_peer_assoc_h_basic(ar, vif, sta, arg); - ath12k_peer_assoc_h_crypto(ar, vif, sta, arg); - ath12k_peer_assoc_h_rates(ar, vif, sta, arg); - ath12k_peer_assoc_h_ht(ar, vif, sta, arg); - ath12k_peer_assoc_h_vht(ar, vif, sta, arg); - ath12k_peer_assoc_h_he(ar, vif, sta, arg); - ath12k_peer_assoc_h_he_6ghz(ar, vif, sta, arg); - ath12k_peer_assoc_h_eht(ar, vif, sta, arg); - ath12k_peer_assoc_h_qos(ar, vif, sta, arg); - ath12k_peer_assoc_h_phymode(ar, vif, sta, arg); - ath12k_peer_assoc_h_smps(sta, arg); + ath12k_peer_assoc_h_basic(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_crypto(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_rates(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_ht(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_vht(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_he(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_he_6ghz(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_eht(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_qos(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_phymode(ar, arvif, arsta, arg); + ath12k_peer_assoc_h_smps(arsta, arg); /* TODO: amsdu_disable req? */ } -static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif, +static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_link_vif *arvif, const u8 *addr, const struct ieee80211_sta_ht_cap *ht_cap, const struct ieee80211_he_6ghz_capa *he_6ghz_capa) @@ -2849,21 +2894,24 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif, } static void ath12k_bss_assoc(struct ath12k *ar, - struct ath12k_vif *arvif, + struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *bss_conf) { - struct ieee80211_vif *vif = arvif->vif; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ath12k_wmi_vdev_up_params params = {}; struct ath12k_wmi_peer_assoc_arg peer_arg; + struct ath12k_link_sta *arsta; struct ieee80211_sta *ap_sta; + struct ath12k_sta *ahsta; struct ath12k_peer *peer; bool is_auth = false; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", - arvif->vdev_id, arvif->bssid, arvif->aid); + arvif->vdev_id, arvif->bssid, ahvif->aid); rcu_read_lock(); @@ -2875,7 +2923,15 @@ static void ath12k_bss_assoc(struct ath12k *ar, return; } - ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false); + ahsta = ath12k_sta_to_ahsta(ap_sta); + arsta = &ahsta->deflink; + + if (WARN_ON(!arsta)) { + rcu_read_unlock(); + return; + } + + ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, false); rcu_read_unlock(); @@ -2903,11 +2959,11 @@ static void ath12k_bss_assoc(struct ath12k *ar, WARN_ON(arvif->is_up); - arvif->aid = vif->cfg.aid; + ahvif->aid = vif->cfg.aid; ether_addr_copy(arvif->bssid, bss_conf->bssid); params.vdev_id = arvif->vdev_id; - params.aid = arvif->aid; + params.aid = ahvif->aid; params.bssid = arvif->bssid; ret = ath12k_wmi_vdev_up(ar, ¶ms); if (ret) { @@ -2949,11 +3005,11 @@ static void ath12k_bss_assoc(struct ath12k *ar, } static void ath12k_bss_disassoc(struct ath12k *ar, - struct ath12k_vif *arvif) + struct ath12k_link_vif *arvif) { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", arvif->vdev_id, arvif->bssid); @@ -2996,10 +3052,10 @@ static u32 ath12k_mac_get_rate_hw_value(int bitrate) } static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, - struct ieee80211_vif *vif, + struct ath12k_link_vif *arvif, struct cfg80211_chan_def *def) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); const struct ieee80211_supported_band *sband; u8 basic_rate_idx; @@ -3008,7 +3064,7 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, u16 bitrate; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); sband = hw->wiphy->bands[def->chan->band]; basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; @@ -3033,9 +3089,19 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); } -static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, +static int +ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *ol[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + return 0; +} + +static int ath12k_mac_fils_discovery(struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *info) { + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); struct ath12k *ar = arvif->ar; struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); struct sk_buff *tmpl; @@ -3046,7 +3112,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, if (info->fils_discovery.max_interval) { interval = info->fils_discovery.max_interval; - tmpl = ieee80211_get_fils_discovery_tmpl(hw, arvif->vif); + tmpl = ieee80211_get_fils_discovery_tmpl(hw, vif); if (tmpl) ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id, tmpl); @@ -3054,8 +3120,7 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, unsol_bcast_probe_resp_enabled = 1; interval = info->unsol_bcast_probe_resp_interval; - tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, - arvif->vif); + tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif); if (tmpl) ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id, tmpl); @@ -3080,10 +3145,44 @@ static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, return ret; } -static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif) +static void ath12k_mac_op_vif_cfg_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u64 changed) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + unsigned long links = ahvif->links_map; + struct ath12k_link_vif *arvif; + struct ath12k *ar; + u8 link_id; + + lockdep_assert_wiphy(hw->wiphy); + + if (changed & BSS_CHANGED_SSID && vif->type == NL80211_IFTYPE_AP) { + ahvif->u.ap.ssid_len = vif->cfg.ssid_len; + if (vif->cfg.ssid_len) + memcpy(ahvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); + } + + if (changed & BSS_CHANGED_ASSOC) { + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!arvif || !arvif->ar) + continue; + + ar = arvif->ar; + + if (vif->cfg.assoc) + ath12k_bss_assoc(ar, arvif, &vif->bss_conf); + else + ath12k_bss_disassoc(ar, arvif); + } + } +} + +static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif) { struct ath12k *ar = arvif->ar; - struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_vif *vif = arvif->ahvif->vif; struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf; enum wmi_sta_powersave_param param; enum wmi_sta_ps_mode psmode; @@ -3091,12 +3190,12 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif) int timeout; bool enable_ps; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (vif->type != NL80211_IFTYPE_STATION) return; - enable_ps = arvif->ps; + enable_ps = arvif->ahvif->ps; if (enable_ps) { psmode = WMI_STA_PS_MODE_ENABLED; param = WMI_STA_PS_PARAM_INACTIVITY_TIME; @@ -3128,11 +3227,12 @@ static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif) } static void ath12k_mac_bss_info_changed(struct ath12k *ar, - struct ath12k_vif *arvif, + struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *info, u64 changed) { - struct ieee80211_vif *vif = arvif->vif; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ieee80211_vif_cfg *vif_cfg = &vif->cfg; struct cfg80211_chan_def def; u32 param_id, param_value; @@ -3146,7 +3246,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, u8 rateidx; u32 rate; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (changed & BSS_CHANGED_BEACON_INT) { arvif->beacon_interval = info->beacon_int; @@ -3202,10 +3302,10 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, if (changed & BSS_CHANGED_SSID && vif->type == NL80211_IFTYPE_AP) { - arvif->u.ap.ssid_len = vif->cfg.ssid_len; + ahvif->u.ap.ssid_len = vif->cfg.ssid_len; if (vif->cfg.ssid_len) - memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); - arvif->u.ap.hidden_ssid = info->hidden_ssid; + memcpy(ahvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); + ahvif->u.ap.hidden_ssid = info->hidden_ssid; } if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) @@ -3316,7 +3416,7 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, } if (changed & BSS_CHANGED_MCAST_RATE && - !ath12k_mac_vif_chan(arvif->vif, &def)) { + !ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) { band = def.chan->band; mcast_rate = vif->bss_conf.mcast_rate[band]; @@ -3360,8 +3460,8 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, } if (changed & BSS_CHANGED_BASIC_RATES && - !ath12k_mac_vif_chan(arvif->vif, &def)) - ath12k_recalculate_mgmt_rate(ar, vif, &def); + !ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) + ath12k_recalculate_mgmt_rate(ar, arvif, &def); if (changed & BSS_CHANGED_TWT) { if (info->twt_requester || info->twt_responder) @@ -3406,53 +3506,177 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar, if (changed & BSS_CHANGED_PS && ar->ab->hw_params->supports_sta_ps) { - arvif->ps = vif_cfg->ps; + ahvif->ps = vif_cfg->ps; ath12k_mac_vif_setup_ps(arvif); } } -static struct ath12k_vif_cache *ath12k_arvif_get_cache(struct ath12k_vif *arvif) +static struct ath12k_vif_cache *ath12k_ahvif_get_link_cache(struct ath12k_vif *ahvif, + u8 link_id) +{ + if (!ahvif->cache[link_id]) { + ahvif->cache[link_id] = kzalloc(sizeof(*ahvif->cache[0]), GFP_KERNEL); + if (ahvif->cache[link_id]) + INIT_LIST_HEAD(&ahvif->cache[link_id]->key_conf.list); + } + + return ahvif->cache[link_id]; +} + +static void ath12k_ahvif_put_link_key_cache(struct ath12k_vif_cache *cache) { - if (!arvif->cache) - arvif->cache = kzalloc(sizeof(*arvif->cache), GFP_KERNEL); + struct ath12k_key_conf *key_conf, *tmp; - return arvif->cache; + if (!cache || list_empty(&cache->key_conf.list)) + return; + list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) { + list_del(&key_conf->list); + kfree(key_conf); + } } -static void ath12k_arvif_put_cache(struct ath12k_vif *arvif) +static void ath12k_ahvif_put_link_cache(struct ath12k_vif *ahvif, u8 link_id) { - kfree(arvif->cache); - arvif->cache = NULL; + ath12k_ahvif_put_link_key_cache(ahvif->cache[link_id]); + kfree(ahvif->cache[link_id]); + ahvif->cache[link_id] = NULL; } -static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, - u64 changed) +static void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u64 changed) { struct ath12k *ar; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); struct ath12k_vif_cache *cache; + struct ath12k_link_vif *arvif; + u8 link_id = info->link_id; - ar = ath12k_get_ar_by_vif(hw, vif); + lockdep_assert_wiphy(hw->wiphy); + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); /* if the vdev is not created on a certain radio, * cache the info to be updated later on vdev creation */ - if (!ar) { - cache = ath12k_arvif_get_cache(arvif); + if (!arvif || !arvif->is_created) { + cache = ath12k_ahvif_get_link_cache(ahvif, link_id); if (!cache) return; - arvif->cache->bss_conf_changed |= changed; + + cache->bss_conf_changed |= changed; + return; } - mutex_lock(&ar->conf_mutex); + ar = arvif->ar; ath12k_mac_bss_info_changed(ar, arvif, info, changed); +} + +static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah, + struct ieee80211_vif *vif, + u8 link_id) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + int i; + + lockdep_assert_wiphy(ah->hw->wiphy); + + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); + if (arvif) + return arvif; + + if (!vif->valid_links) { + /* Use deflink for Non-ML VIFs and mark the link id as 0 + */ + link_id = 0; + arvif = &ahvif->deflink; + } else { + /* If this is the first link arvif being created for an ML VIF + * use the preallocated deflink memory + */ + if (!ahvif->links_map) { + arvif = &ahvif->deflink; + } else { + arvif = (struct ath12k_link_vif *) + kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL); + if (!arvif) + return NULL; + } + } + + arvif->ahvif = ahvif; + arvif->link_id = link_id; + ahvif->links_map |= BIT(link_id); + + INIT_LIST_HEAD(&arvif->list); + INIT_DELAYED_WORK(&arvif->connection_loss_work, + ath12k_mac_vif_sta_connection_loss_work); + + for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { + arvif->bitrate_mask.control[i].legacy = 0xffffffff; + memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].ht_mcs)); + memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + } + + /* Allocate Default Queue now and reassign during actual vdev create */ + vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE; + for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) + vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; - mutex_unlock(&ar->conf_mutex); + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; + + rcu_assign_pointer(ahvif->link[arvif->link_id], arvif); + ahvif->links_map |= BIT(link_id); + synchronize_rcu(); + return arvif; +} + +static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k_hw *ah = ahvif->ah; + + lockdep_assert_wiphy(ah->hw->wiphy); + + rcu_assign_pointer(ahvif->link[arvif->link_id], NULL); + synchronize_rcu(); + ahvif->links_map &= ~BIT(arvif->link_id); + + if (arvif != &ahvif->deflink) + kfree(arvif); + else + memset(arvif, 0, sizeof(*arvif)); +} + +static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw, + struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k_hw *ah = hw->priv; + struct ath12k *ar = arvif->ar; + int ret; + + lockdep_assert_wiphy(ah->hw->wiphy); + + cancel_delayed_work_sync(&arvif->connection_loss_work); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)", + arvif->vdev_id, arvif->link_id); + + if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { + ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid); + if (ret) + ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d", + arvif->vdev_id, arvif->link_id, ret); + } + ath12k_mac_vdev_delete(ar, arvif); } static struct ath12k* @@ -3541,7 +3765,7 @@ static int ath12k_scan_stop(struct ath12k *ar) }; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); /* TODO: Fill other STOP Params */ arg.pdev_id = ar->pdev->pdev_id; @@ -3581,7 +3805,7 @@ static void ath12k_scan_abort(struct ath12k *ar) { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); spin_lock_bh(&ar->data_lock); @@ -3616,9 +3840,9 @@ static void ath12k_scan_timeout_work(struct work_struct *work) struct ath12k *ar = container_of(work, struct ath12k, scan.timeout.work); - mutex_lock(&ar->conf_mutex); + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); ath12k_scan_abort(ar); - mutex_unlock(&ar->conf_mutex); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); } static int ath12k_start_scan(struct ath12k *ar, @@ -3626,7 +3850,7 @@ static int ath12k_start_scan(struct ath12k *ar, { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ret = ath12k_wmi_send_scan_start_cmd(ar, arg); if (ret) @@ -3655,24 +3879,50 @@ static int ath12k_start_scan(struct ath12k *ar, return 0; } +static u8 +ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar) +{ + struct ath12k_link_vif *arvif; + struct ath12k_hw *ah = ahvif->ah; + unsigned long links = ahvif->links_map; + u8 link_id; + + lockdep_assert_wiphy(ah->hw->wiphy); + + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); + + if (!arvif || !arvif->is_created) + continue; + + if (ar == arvif->ar) + return link_id; + } + + /* input ar is not assigned to any of the links, use link id + * 0 for scan vdev creation. + */ + return 0; +} + static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); - struct ath12k *ar, *prev_ar; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k *ar; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; struct cfg80211_scan_request *req = &hw_req->req; struct ath12k_wmi_scan_req_arg *arg = NULL; + u8 link_id; int ret; int i; bool create = true; - if (ah->num_radio == 1) { - WARN_ON(!arvif->is_created); - ar = ath12k_ah_to_ar(ah, 0); - goto scan; - } + lockdep_assert_wiphy(hw->wiphy); + + arvif = &ahvif->deflink; /* Since the targeted scan device could depend on the frequency * requested in the hw_req, select the corresponding radio @@ -3681,6 +3931,13 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, if (!ar) return -EINVAL; + /* check if any of the links of ML VIF is already started on + * radio(ar) correpsondig to given scan frequency and use it, + * if not use deflink(link 0) for scan purpose. + */ + link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar); + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + /* If the vif is already assigned to a specific vdev of an ar, * check whether its already started, vdev which is started * are not allowed to switch to a new radio. @@ -3698,31 +3955,24 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, return -EINVAL; if (ar != arvif->ar) { - /* backup the previously used ar ptr, since the vdev delete - * would assign the arvif->ar to NULL after the call - */ - prev_ar = arvif->ar; - mutex_lock(&prev_ar->conf_mutex); - ret = ath12k_mac_vdev_delete(prev_ar, vif); - mutex_unlock(&prev_ar->conf_mutex); - if (ret) - ath12k_warn(prev_ar->ab, - "unable to delete scan vdev %d\n", ret); + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); } else { create = false; } } if (create) { - mutex_lock(&ar->conf_mutex); - ret = ath12k_mac_vdev_create(ar, vif); - mutex_unlock(&ar->conf_mutex); + /* Previous arvif would've been cleared in radio switch block + * above, assign arvif again for create. + */ + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + + ret = ath12k_mac_vdev_create(ar, arvif); if (ret) { ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret); return -EINVAL; } } -scan: - mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { @@ -3805,30 +4055,31 @@ exit: kfree(arg); } - mutex_unlock(&ar->conf_mutex); - return ret; } static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; struct ath12k *ar; + lockdep_assert_wiphy(hw->wiphy); + + arvif = &ahvif->deflink; + if (!arvif->is_created) return; ar = arvif->ar; - mutex_lock(&ar->conf_mutex); ath12k_scan_abort(ar); - mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); } -static int ath12k_install_key(struct ath12k_vif *arvif, +static int ath12k_install_key(struct ath12k_link_vif *arvif, struct ieee80211_key_conf *key, enum set_key_cmd cmd, const u8 *macaddr, u32 flags) @@ -3843,8 +4094,10 @@ static int ath12k_install_key(struct ath12k_vif *arvif, .key_flags = flags, .macaddr = macaddr, }; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); - lockdep_assert_held(&arvif->ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); reinit_completion(&ar->install_key_done); @@ -3895,13 +4148,13 @@ install: if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ)) return -ETIMEDOUT; - if (ether_addr_equal(macaddr, arvif->vif->addr)) - arvif->key_cipher = key->cipher; + if (ether_addr_equal(macaddr, vif->addr)) + ahvif->key_cipher = key->cipher; return ar->install_key_status ? -EINVAL : 0; } -static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, +static int ath12k_clear_peer_keys(struct ath12k_link_vif *arvif, const u8 *addr) { struct ath12k *ar = arvif->ar; @@ -3912,7 +4165,7 @@ static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, int i; u32 flags = 0; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); spin_lock_bh(&ab->base_lock); peer = ath12k_peer_find(ab, arvif->vdev_id, addr); @@ -3944,25 +4197,31 @@ static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, } static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, struct ieee80211_key_conf *key) { + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ieee80211_sta *sta = NULL; struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_peer *peer; - struct ath12k_sta *arsta; + struct ath12k_sta *ahsta; const u8 *peer_addr; - int ret = 0; + int ret; u32 flags = 0; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + if (arsta) + sta = ath12k_ahsta_to_sta(arsta->ahsta); if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) return 1; if (sta) peer_addr = sta->addr; - else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) + else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) peer_addr = vif->bss_conf.bssid; else peer_addr = vif->addr; @@ -3970,7 +4229,7 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, key->hw_key_idx = key->keyidx; /* the peer should not disappear in mid-way (unless FW goes awry) since - * we already hold conf_mutex. we just make sure its there now. + * we already hold wiphy lock. we just make sure its there now. */ spin_lock_bh(&ab->base_lock); peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); @@ -3980,14 +4239,13 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, if (cmd == SET_KEY) { ath12k_warn(ab, "cannot install key for non-existent peer %pM\n", peer_addr); - ret = -EOPNOTSUPP; - goto exit; - } else { - /* if the peer doesn't exist there is no key to disable - * anymore - */ - goto exit; + return -EOPNOTSUPP; } + + /* if the peer doesn't exist there is no key to disable + * anymore + */ + return 0; } if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) @@ -3998,13 +4256,13 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags); if (ret) { ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret); - goto exit; + return ret; } ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key); if (ret) { ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret); - goto exit; + return ret; } spin_lock_bh(&ab->base_lock); @@ -4029,7 +4287,7 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr); if (sta) { - arsta = ath12k_sta_to_arsta(sta); + ahsta = ath12k_sta_to_ahsta(sta); switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: @@ -4038,61 +4296,138 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: if (cmd == SET_KEY) - arsta->pn_type = HAL_PN_TYPE_WPA; + ahsta->pn_type = HAL_PN_TYPE_WPA; else - arsta->pn_type = HAL_PN_TYPE_NONE; + ahsta->pn_type = HAL_PN_TYPE_NONE; break; default: - arsta->pn_type = HAL_PN_TYPE_NONE; + ahsta->pn_type = HAL_PN_TYPE_NONE; break; } } spin_unlock_bh(&ab->base_lock); -exit: - return ret; + return 0; +} + +static int ath12k_mac_update_key_cache(struct ath12k_vif_cache *cache, + enum set_key_cmd cmd, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct ath12k_key_conf *key_conf = NULL, *tmp; + + if (cmd == SET_KEY) { + key_conf = kzalloc(sizeof(*key_conf), GFP_KERNEL); + + if (!key_conf) + return -ENOMEM; + + key_conf->cmd = cmd; + key_conf->sta = sta; + key_conf->key = key; + list_add_tail(&key_conf->list, + &cache->key_conf.list); + } + if (list_empty(&cache->key_conf.list)) + return 0; + list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) { + if (key_conf->key == key) { + /* DEL key for an old SET key which driver hasn't flushed yet. + */ + list_del(&key_conf->list); + kfree(key_conf); + break; + } + } + return 0; } static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + struct ath12k_link_sta *arsta = NULL; struct ath12k_vif_cache *cache; - struct ath12k *ar; + struct ath12k_sta *ahsta; + unsigned long links; + u8 link_id; int ret; + lockdep_assert_wiphy(hw->wiphy); + /* BIP needs to be done in software */ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || - key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) + key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) { return 1; + } if (key->keyidx > WMI_MAX_KEY_INDEX) return -ENOSPC; - ar = ath12k_get_ar_by_vif(hw, vif); - if (!ar) { - /* ar is expected to be valid when sta ptr is available */ - if (sta) { - WARN_ON_ONCE(1); - return -EINVAL; + if (sta) { + ahsta = ath12k_sta_to_ahsta(sta); + /* For an ML STA Pairwise key is same for all associated link Stations, + * hence do set key for all link STAs which are active. + */ + if (sta->mlo) { + links = ahsta->links_map; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, + ahvif->link[link_id]); + arsta = wiphy_dereference(hw->wiphy, + ahsta->link[link_id]); + + if (WARN_ON(!arvif || !arsta)) + /* arvif and arsta are expected to be valid when + * STA is present. + */ + continue; + + ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, + arsta, key); + if (ret) + break; + } + } else { + arsta = &ahsta->deflink; + arvif = arsta->arvif; + if (WARN_ON(!arvif)) { + ret = -EINVAL; + goto out; + } + + ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, arsta, key); + } + } else { + if (key->link_id >= 0 && key->link_id < IEEE80211_MLD_MAX_NUM_LINKS) { + link_id = key->link_id; + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + } else { + link_id = 0; + arvif = &ahvif->deflink; } - cache = ath12k_arvif_get_cache(arvif); - if (!cache) - return -ENOSPC; - cache->key_conf.cmd = cmd; - cache->key_conf.key = key; - cache->key_conf.changed = true; - return 0; + if (!arvif || !arvif->is_created) { + cache = ath12k_ahvif_get_link_cache(ahvif, link_id); + if (!cache) + return -ENOSPC; + + ret = ath12k_mac_update_key_cache(cache, cmd, sta, key); + + return ret; + } + + ret = ath12k_mac_set_key(arvif->ar, cmd, arvif, NULL, key); } - mutex_lock(&ar->conf_mutex); - ret = ath12k_mac_set_key(ar, cmd, vif, sta, key); - mutex_unlock(&ar->conf_mutex); +out: + return ret; } @@ -4111,17 +4446,18 @@ ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar, } static int -ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif, - struct ieee80211_sta *sta, +ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, const struct cfg80211_bitrate_mask *mask, enum nl80211_band band) { + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k *ar = arvif->ar; u8 vht_rate, nss; u32 rate_code; int ret, i; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); nss = 0; @@ -4157,11 +4493,12 @@ ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif, } static int ath12k_station_assoc(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta, bool reassoc) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k_wmi_peer_assoc_arg peer_arg; int ret; struct cfg80211_chan_def def; @@ -4169,15 +4506,15 @@ static int ath12k_station_assoc(struct ath12k *ar, struct cfg80211_bitrate_mask *mask; u8 num_vht_rates; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return -EPERM; band = def.chan->band; mask = &arvif->bitrate_mask; - ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, reassoc); if (peer_arg.peer_nss < 1) { ath12k_warn(ar->ab, @@ -4205,7 +4542,7 @@ static int ath12k_station_assoc(struct ath12k *ar, * Note that all other rates and NSS will be disabled for this peer. */ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { - ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, + ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, band); if (ret) return ret; @@ -4234,7 +4571,7 @@ static int ath12k_station_assoc(struct ath12k *ar, } if (sta->wme && sta->uapsd_queues) { - ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta); + ret = ath12k_peer_assoc_qos_ap(ar, arvif, arsta); if (ret) { ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n", sta->addr, arvif->vdev_id, ret); @@ -4246,13 +4583,13 @@ static int ath12k_station_assoc(struct ath12k *ar, } static int ath12k_station_disassoc(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (!sta->wme) { arvif->num_legacy_stations--; @@ -4270,11 +4607,10 @@ static int ath12k_station_disassoc(struct ath12k *ar, return 0; } -static void ath12k_sta_rc_update_wk(struct work_struct *wk) +static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk) { struct ath12k *ar; - struct ath12k_vif *arvif; - struct ath12k_sta *arsta; + struct ath12k_link_vif *arvif; struct ieee80211_sta *sta; struct cfg80211_chan_def def; enum nl80211_band band; @@ -4285,13 +4621,18 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) const struct cfg80211_bitrate_mask *mask; struct ath12k_wmi_peer_assoc_arg peer_arg; enum wmi_phy_mode peer_phymode; + struct ath12k_link_sta *arsta; + struct ieee80211_vif *vif; - arsta = container_of(wk, struct ath12k_sta, update_wk); - sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); + lockdep_assert_wiphy(wiphy); + + arsta = container_of(wk, struct ath12k_link_sta, update_wk); + sta = ath12k_ahsta_to_sta(arsta->ahsta); arvif = arsta->arvif; + vif = ath12k_ahvif_to_vif(arvif->ahvif); ar = arvif->ar; - if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def))) + if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def))) return; band = def.chan->band; @@ -4310,14 +4651,12 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) spin_unlock_bh(&ar->data_lock); - mutex_lock(&ar->conf_mutex); - nss = max_t(u32, 1, nss); nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask), ath12k_mac_max_vht_nss(vht_mcs_mask))); if (changed & IEEE80211_RC_BW_CHANGED) { - ath12k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg); + ath12k_peer_assoc_h_phymode(ar, arvif, arsta, &peer_arg); peer_phymode = peer_arg.peer_phymode; if (bw > bw_prev) { @@ -4334,7 +4673,7 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) if (err) { ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n", sta->addr, peer_phymode, err); - goto err_rc_bw_changed; + return; } err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_CHWIDTH, @@ -4355,7 +4694,7 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) if (err) { ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n", sta->addr, bw, err); - goto err_rc_bw_changed; + return; } err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_PHYMODE, @@ -4405,14 +4744,14 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) * across HT/VHT and for multiple VHT MCS support. */ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { - ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, + ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, band); } else { /* If the peer is non-VHT or no fixed VHT rate * is provided in the new bitrate mask we set the * other rates using peer_assoc command. */ - ath12k_peer_assoc_prepare(ar, arvif->vif, sta, + ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, true); err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); @@ -4425,18 +4764,17 @@ static void ath12k_sta_rc_update_wk(struct work_struct *wk) sta->addr, arvif->vdev_id); } } -err_rc_bw_changed: - mutex_unlock(&ar->conf_mutex); } -static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif, - struct ieee80211_sta *sta) +static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) { + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k *ar = arvif->ar; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) + if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) return 0; if (ar->num_stations >= ar->max_num_stations) @@ -4447,38 +4785,38 @@ static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif, return 0; } -static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif, - struct ieee80211_sta *sta) +static void ath12k_mac_dec_num_stations(struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) { + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k *ar = arvif->ar; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) + if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) return; ar->num_stations--; } static int ath12k_mac_station_add(struct ath12k *ar, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ath12k_link_vif *arvif, + struct ath12k_link_sta *arsta) { struct ath12k_base *ab = ar->ab; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); + struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k_wmi_peer_create_arg peer_param; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - ret = ath12k_mac_inc_num_stations(arvif, sta); + ret = ath12k_mac_inc_num_stations(arvif, arsta); if (ret) { ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n", ar->max_num_stations); goto exit; } - arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); if (!arsta->rx_stats) { ret = -ENOMEM; @@ -4519,7 +4857,7 @@ static int ath12k_mac_station_add(struct ath12k *ar, if (ab->hw_params->vdev_start_delay && !arvif->is_started && - arvif->vdev_type != WMI_VDEV_TYPE_AP) { + arvif->ahvif->vdev_type != WMI_VDEV_TYPE_AP) { ret = ath12k_start_vdev_delay(ar, arvif); if (ret) { ath12k_warn(ab, "failed to delay vdev start: %d\n", ret); @@ -4532,7 +4870,7 @@ static int ath12k_mac_station_add(struct ath12k *ar, free_peer: ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); dec_num_station: - ath12k_mac_dec_num_stations(arvif, sta); + ath12k_mac_dec_num_stations(arvif, arsta); exit: return ret; } @@ -4574,16 +4912,18 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); struct ath12k *ar; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); + struct ath12k_link_vif *arvif; + struct ath12k_link_sta *arsta; struct ath12k_peer *peer; int ret = 0; - /* cancel must be done outside the mutex to avoid deadlock */ - if ((old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST)) - cancel_work_sync(&arsta->update_wk); + lockdep_assert_wiphy(hw->wiphy); + + arvif = &ahvif->deflink; + arsta = &ahsta->deflink; ar = ath12k_get_ar_by_vif(hw, vif); if (!ar) { @@ -4591,21 +4931,28 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, return -EINVAL; } - mutex_lock(&ar->conf_mutex); - if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { memset(arsta, 0, sizeof(*arsta)); + rcu_assign_pointer(ahsta->link[0], arsta); + /* TODO use appropriate link id once MLO support is added */ + arsta->link_id = ATH12K_DEFAULT_LINK_ID; + ahsta->links_map = BIT(arsta->link_id); + arsta->ahsta = ahsta; arsta->arvif = arvif; - INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk); + wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk); + + synchronize_rcu(); - ret = ath12k_mac_station_add(ar, vif, sta); + ret = ath12k_mac_station_add(ar, arvif, arsta); if (ret) ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); } else if ((old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST)) { - if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { + wiphy_work_cancel(hw->wiphy, &arsta->update_wk); + + if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) { ath12k_bss_disassoc(ar, arvif); ret = ath12k_mac_vdev_stop(arvif); if (ret) @@ -4622,7 +4969,7 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", sta->addr, arvif->vdev_id); - ath12k_mac_dec_num_stations(arvif, sta); + ath12k_mac_dec_num_stations(arvif, arsta); spin_lock_bh(&ar->ab->base_lock); peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (peer && peer->sta == sta) { @@ -4637,12 +4984,20 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, kfree(arsta->rx_stats); arsta->rx_stats = NULL; + + if (arsta->link_id < IEEE80211_MLD_MAX_NUM_LINKS) { + rcu_assign_pointer(ahsta->link[arsta->link_id], NULL); + synchronize_rcu(); + ahsta->links_map &= ~(BIT(arsta->link_id)); + arsta->link_id = ATH12K_INVALID_LINK_ID; + arsta->ahsta = NULL; + } } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC && (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_ADHOC)) { - ret = ath12k_station_assoc(ar, vif, sta, false); + ret = ath12k_station_assoc(ar, arvif, arsta, false); if (ret) ath12k_warn(ar->ab, "Failed to associate station: %pM\n", sta->addr); @@ -4686,14 +5041,12 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT || vif->type == NL80211_IFTYPE_ADHOC)) { - ret = ath12k_station_disassoc(ar, vif, sta); + ret = ath12k_station_disassoc(ar, arvif, arsta); if (ret) ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n", sta->addr); } - mutex_unlock(&ar->conf_mutex); - return ret; } @@ -4703,25 +5056,32 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; int ret; s16 txpwr; + lockdep_assert_wiphy(hw->wiphy); + + arvif = &ahvif->deflink; + if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { txpwr = 0; } else { txpwr = sta->deflink.txpwr.power; - if (!txpwr) - return -EINVAL; + if (!txpwr) { + ret = -EINVAL; + goto out; + } } - if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) - return -EINVAL; + if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) { + ret = -EINVAL; + goto out; + } ar = ath12k_ah_to_ar(ah, 0); - mutex_lock(&ar->conf_mutex); - ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, WMI_PEER_USE_FIXED_PWR, txpwr); if (ret) { @@ -4731,7 +5091,6 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, } out: - mutex_unlock(&ar->conf_mutex); return ret; } @@ -4742,10 +5101,16 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, { struct ieee80211_sta *sta = link_sta->sta; struct ath12k *ar; - struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_sta *arsta; + struct ath12k_link_vif *arvif; struct ath12k_peer *peer; u32 bw, smps; + /* TODO: use proper link id once link sta specific rc update support is + * available in mac80211. + */ + u8 link_id = ATH12K_DEFAULT_LINK_ID; ar = ath12k_get_ar_by_vif(hw, vif); if (!ar) { @@ -4753,11 +5118,27 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, return; } + rcu_read_lock(); + arvif = rcu_dereference(ahvif->link[link_id]); + if (!arvif) { + ath12k_warn(ar->ab, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n", + link_id, sta->addr); + rcu_read_unlock(); + return; + } + arsta = rcu_dereference(ahsta->link[link_id]); + if (!arsta) { + rcu_read_unlock(); + ath12k_warn(ar->ab, "mac sta rc update failed to fetch link sta on link id %u for peer %pM\n", + link_id, sta->addr); + return; + } spin_lock_bh(&ar->ab->base_lock); peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); if (!peer) { spin_unlock_bh(&ar->ab->base_lock); + rcu_read_unlock(); ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n", sta->addr, arvif->vdev_id); return; @@ -4809,17 +5190,20 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, spin_unlock_bh(&ar->data_lock); - ieee80211_queue_work(hw, &arsta->update_wk); + wiphy_work_queue(hw->wiphy, &arsta->update_wk); + + rcu_read_unlock(); } -static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif, +static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif, u16 ac, bool enable) { struct ath12k *ar = arvif->ar; + struct ath12k_vif *ahvif = arvif->ahvif; u32 value; int ret; - if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + if (ahvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; switch (ac) { @@ -4842,19 +5226,19 @@ static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif, } if (enable) - arvif->u.sta.uapsd |= value; + ahvif->u.sta.uapsd |= value; else - arvif->u.sta.uapsd &= ~value; + ahvif->u.sta.uapsd &= ~value; ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, WMI_STA_PS_PARAM_UAPSD, - arvif->u.sta.uapsd); + ahvif->u.sta.uapsd); if (ret) { ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret); goto exit; } - if (arvif->u.sta.uapsd) + if (ahvif->u.sta.uapsd) value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; else value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; @@ -4869,8 +5253,7 @@ exit: return ret; } -static int ath12k_mac_conf_tx(struct ath12k_vif *arvif, - unsigned int link_id, u16 ac, +static int ath12k_mac_conf_tx(struct ath12k_link_vif *arvif, u16 ac, const struct ieee80211_tx_queue_params *params) { struct wmi_wmm_params_arg *p = NULL; @@ -4878,7 +5261,7 @@ static int ath12k_mac_conf_tx(struct ath12k_vif *arvif, struct ath12k_base *ab = ar->ab; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); switch (ac) { case IEEE80211_AC_VO: @@ -4927,26 +5310,30 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params) { - struct ath12k *ar; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_vif_cache *cache = arvif->cache; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + struct ath12k_vif_cache *cache; int ret; - ar = ath12k_get_ar_by_vif(hw, vif); - if (!ar) { - /* cache the info and apply after vdev is created */ - cache = ath12k_arvif_get_cache(arvif); + lockdep_assert_wiphy(hw->wiphy); + + if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) + return -EINVAL; + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!arvif || !arvif->is_created) { + cache = ath12k_ahvif_get_link_cache(ahvif, link_id); if (!cache) return -ENOSPC; + cache->tx_conf.changed = true; cache->tx_conf.ac = ac; cache->tx_conf.tx_queue_params = *params; + return 0; } - mutex_lock(&ar->conf_mutex); - ret = ath12k_mac_conf_tx(arvif, link_id, ac, params); - mutex_unlock(&ar->conf_mutex); + ret = ath12k_mac_conf_tx(arvif, ac, params); return ret; } @@ -5017,10 +5404,11 @@ ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask return ht_cap; } -static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif) +static int ath12k_mac_set_txbf_conf(struct ath12k_link_vif *arvif) { u32 value = 0; struct ath12k *ar = arvif->ar; + struct ath12k_vif *ahvif = arvif->ahvif; int nsts; int sound_dim; u32 vht_cap = ar->pdev->cap.vht_cap; @@ -5048,7 +5436,7 @@ static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif) value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) && - arvif->vdev_type == WMI_VDEV_TYPE_AP) + ahvif->vdev_type == WMI_VDEV_TYPE_AP) value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; } @@ -5056,7 +5444,7 @@ static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif) value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && - arvif->vdev_type == WMI_VDEV_TYPE_STA) + ahvif->vdev_type == WMI_VDEV_TYPE_STA) value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; } @@ -5613,7 +6001,7 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) struct ath12k_hw *ah = ath12k_ar_to_ah(ar); int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (ath12k_check_chain_mask(ar, tx_ant, true)) return -EINVAL; @@ -5717,7 +6105,7 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) return 0; } -static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, +static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arvif, struct sk_buff *skb) { struct ath12k_base *ab = ar->ab; @@ -5785,7 +6173,8 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work) { struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work); struct ath12k_skb_cb *skb_cb; - struct ath12k_vif *arvif; + struct ath12k_vif *ahvif; + struct ath12k_link_vif *arvif; struct sk_buff *skb; int ret; @@ -5797,8 +6186,8 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work) continue; } - arvif = ath12k_vif_to_arvif(skb_cb->vif); - + ahvif = ath12k_vif_to_ahvif(skb_cb->vif); + arvif = &ahvif->deflink; if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) { ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb); if (ret) { @@ -5853,18 +6242,18 @@ static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar, struct sk_buff *skb, bool is_prb_rsp) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); if (likely(!is_prb_rsp)) return; spin_lock_bh(&ar->data_lock); - if (arvif->u.ap.noa_data && - !pskb_expand_head(skb, 0, arvif->u.ap.noa_len, + if (ahvif->u.ap.noa_data && + !pskb_expand_head(skb, 0, ahvif->u.ap.noa_len, GFP_ATOMIC)) - skb_put_data(skb, arvif->u.ap.noa_data, - arvif->u.ap.noa_len); + skb_put_data(skb, ahvif->u.ap.noa_data, + ahvif->u.ap.noa_len); spin_unlock_bh(&ar->data_lock); } @@ -5876,7 +6265,8 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw, struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif = &ahvif->deflink; struct ath12k *ar = arvif->ar; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key_conf *key = info->control.hw_key; @@ -5940,8 +6330,7 @@ static int ath12k_mac_start(struct ath12k *ar) int ret; lockdep_assert_held(&ah->hw_mutex); - - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1, pdev->pdev_id); @@ -6026,14 +6415,11 @@ static int ath12k_mac_start(struct ath12k *ar) } } - mutex_unlock(&ar->conf_mutex); - rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], &ab->pdevs[ar->pdev_idx]); return 0; err: - mutex_unlock(&ar->conf_mutex); return ret; } @@ -6053,6 +6439,8 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw) struct ath12k *ar; int ret, i; + lockdep_assert_wiphy(hw->wiphy); + ath12k_drain_tx(ah); guard(mutex)(&ah->hw_mutex); @@ -6159,15 +6547,14 @@ static void ath12k_mac_stop(struct ath12k *ar) int ret; lockdep_assert_held(&ah->hw_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - mutex_lock(&ar->conf_mutex); ret = ath12k_mac_config_mon_status_default(ar, false); if (ret && (ret != -EOPNOTSUPP)) ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n", ret); clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); - mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); cancel_work_sync(&ar->regd_update_work); @@ -6193,6 +6580,8 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) struct ath12k *ar; int i; + lockdep_assert_wiphy(hw->wiphy); + ath12k_drain_tx(ah); mutex_lock(&ah->hw_mutex); @@ -6206,7 +6595,7 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) } static u8 -ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif) +ath12k_mac_get_vdev_stats_id(struct ath12k_link_vif *arvif) { struct ath12k_base *ab = arvif->ar->ab; u8 vdev_stats_id = 0; @@ -6228,19 +6617,22 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif) return vdev_stats_id; } -static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_vif *arvif, +static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif, u32 *flags, u32 *tx_vdev_id) { - struct ieee80211_vif *tx_vif = arvif->vif->mbssid_tx_vif; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif; struct ath12k *ar = arvif->ar; - struct ath12k_vif *tx_arvif; + struct ath12k_link_vif *tx_arvif; + struct ath12k_vif *tx_ahvif; if (!tx_vif) return 0; - tx_arvif = ath12k_vif_to_arvif(tx_vif); + tx_ahvif = ath12k_vif_to_ahvif(tx_vif); + tx_arvif = &tx_ahvif->deflink; - if (arvif->vif->bss_conf.nontransmitted) { + if (ahvif->vif->bss_conf.nontransmitted) { if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) return -EINVAL; @@ -6252,22 +6644,23 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_vif *arvif, return -EINVAL; } - if (arvif->vif->bss_conf.ema_ap) + if (ahvif->vif->bss_conf.ema_ap) *flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE; return 0; } -static int ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif, +static int ath12k_mac_setup_vdev_create_arg(struct ath12k_link_vif *arvif, struct ath12k_wmi_vdev_create_arg *arg) { struct ath12k *ar = arvif->ar; struct ath12k_pdev *pdev = ar->pdev; + struct ath12k_vif *ahvif = arvif->ahvif; int ret; arg->if_id = arvif->vdev_id; - arg->type = arvif->vdev_type; - arg->subtype = arvif->vdev_subtype; + arg->type = ahvif->vdev_type; + arg->subtype = ahvif->vdev_subtype; arg->pdev_id = pdev->pdev_id; arg->mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP; @@ -6334,14 +6727,15 @@ ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype) } static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar, - struct ath12k_vif *arvif) + struct ath12k_link_vif *arvif) { u32 param_id, param_value; struct ath12k_base *ab = ar->ab; + struct ath12k_vif *ahvif = arvif->ahvif; int ret; param_id = WMI_VDEV_PARAM_SET_HEMU_MODE; - param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type); + param_value = ath12k_mac_prepare_he_mode(ar->pdev, ahvif->vif->type); ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param_id, param_value); if (ret) { @@ -6364,9 +6758,10 @@ static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar, return ret; } -static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif) +static void ath12k_mac_update_vif_offload(struct ath12k_link_vif *arvif) { - struct ieee80211_vif *vif = arvif->vif; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; u32 param_id, param_value; @@ -6379,14 +6774,14 @@ static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif) IEEE80211_OFFLOAD_DECAP_ENABLED); if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) - arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET; + ahvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET; else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) - arvif->tx_encap_type = ATH12K_HW_TXRX_RAW; + ahvif->tx_encap_type = ATH12K_HW_TXRX_RAW; else - arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI; + ahvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI; ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, - param_id, arvif->tx_encap_type); + param_id, ahvif->tx_encap_type); if (ret) { ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n", arvif->vdev_id, ret); @@ -6413,57 +6808,86 @@ static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif) static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + unsigned long links; + int link_id; - ath12k_mac_update_vif_offload(arvif); + lockdep_assert_wiphy(hw->wiphy); + + if (vif->valid_links) { + links = vif->valid_links; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!(arvif && arvif->ar)) + continue; + + ath12k_mac_update_vif_offload(arvif); + } + + return; + } + + ath12k_mac_update_vif_offload(&ahvif->deflink); } -static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif) +int ath12k_mac_vdev_create(struct ath12k *ar, struct ath12k_link_vif *arvif) { struct ath12k_hw *ah = ar->ah; struct ath12k_base *ab = ar->ab; struct ieee80211_hw *hw = ah->hw; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; struct ath12k_wmi_peer_create_arg peer_param; + struct ieee80211_bss_conf *link_conf; u32 param_id, param_value; u16 nss; int i; int ret, vdev_id; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); + + link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[arvif->link_id]); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n", + vif->addr, arvif->link_id); + return -ENOLINK; + } + + memcpy(arvif->bssid, link_conf->addr, ETH_ALEN); arvif->ar = ar; vdev_id = __ffs64(ab->free_vdev_map); arvif->vdev_id = vdev_id; - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; switch (vif->type) { case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: - arvif->vdev_type = WMI_VDEV_TYPE_STA; + ahvif->vdev_type = WMI_VDEV_TYPE_STA; if (vif->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; break; case NL80211_IFTYPE_MESH_POINT: - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; fallthrough; case NL80211_IFTYPE_AP: - arvif->vdev_type = WMI_VDEV_TYPE_AP; + ahvif->vdev_type = WMI_VDEV_TYPE_AP; if (vif->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; break; case NL80211_IFTYPE_MONITOR: - arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; + ahvif->vdev_type = WMI_VDEV_TYPE_MONITOR; ar->monitor_vdev_id = vdev_id; break; case NL80211_IFTYPE_P2P_DEVICE: - arvif->vdev_type = WMI_VDEV_TYPE_STA; - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; + ahvif->vdev_type = WMI_VDEV_TYPE_STA; + ahvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; break; default: WARN_ON(1); @@ -6471,7 +6895,7 @@ static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif) } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n", - arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, + arvif->vdev_id, ahvif->vdev_type, ahvif->vdev_subtype, ab->free_vdev_map); vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1); @@ -6485,11 +6909,11 @@ static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif) goto err; } - ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg); + ret = ath12k_wmi_vdev_create(ar, arvif->bssid, &vdev_arg); if (ret) { ath12k_warn(ab, "failed to create WMI vdev %d: %d\n", arvif->vdev_id, ret); - goto err; + return ret; } ar->num_created_vdevs++; @@ -6514,10 +6938,10 @@ static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif) goto err_vdev_del; } - switch (arvif->vdev_type) { + switch (ahvif->vdev_type) { case WMI_VDEV_TYPE_AP: peer_param.vdev_id = arvif->vdev_id; - peer_param.peer_addr = vif->addr; + peer_param.peer_addr = arvif->bssid; peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; ret = ath12k_peer_create(ar, arvif, NULL, &peer_param); if (ret) { @@ -6590,29 +7014,28 @@ static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif) } ath12k_dp_vdev_tx_attach(ar, arvif); - if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled) ath12k_mac_monitor_vdev_create(ar); - arvif->ar = ar; return ret; err_peer_del: - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { + if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { reinit_completion(&ar->peer_delete_done); - ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr, + ret = ath12k_wmi_send_peer_delete_cmd(ar, arvif->bssid, arvif->vdev_id); if (ret) { ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", - arvif->vdev_id, vif->addr); + arvif->vdev_id, arvif->bssid); goto err; } ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, - vif->addr); + arvif->bssid); if (ret) - goto err; + /* KVALO: why not goto err? */ + return ret; ar->num_peers--; } @@ -6634,21 +7057,56 @@ err: return ret; } -static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ieee80211_vif *vif) +static void ath12k_mac_vif_flush_key_cache(struct ath12k_link_vif *arvif) +{ + struct ath12k_key_conf *key_conf, *tmp; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k_hw *ah = ahvif->ah; + struct ath12k_sta *ahsta; + struct ath12k_link_sta *arsta; + struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id]; + int ret; + + lockdep_assert_wiphy(ah->hw->wiphy); + + list_for_each_entry_safe(key_conf, tmp, &cache->key_conf.list, list) { + arsta = NULL; + if (key_conf->sta) { + ahsta = ath12k_sta_to_ahsta(key_conf->sta); + arsta = wiphy_dereference(ah->hw->wiphy, + ahsta->link[arvif->link_id]); + if (!arsta) + goto free_cache; + } + + ret = ath12k_mac_set_key(arvif->ar, key_conf->cmd, + arvif, arsta, + key_conf->key); + if (ret) + ath12k_warn(arvif->ar->ab, "unable to apply set key param to vdev %d ret %d\n", + arvif->vdev_id, ret); +free_cache: + list_del(&key_conf->list); + kfree(key_conf); + } +} + +static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif *arvif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_vif_cache *cache = arvif->cache; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); + struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id]; struct ath12k_base *ab = ar->ab; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (!cache) return; if (cache->tx_conf.changed) { - ret = ath12k_mac_conf_tx(arvif, 0, cache->tx_conf.ac, + ret = ath12k_mac_conf_tx(arvif, cache->tx_conf.ac, &cache->tx_conf.tx_queue_params); if (ret) ath12k_warn(ab, @@ -6661,26 +7119,25 @@ static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ieee80211_vif cache->bss_conf_changed); } - if (cache->key_conf.changed) { - ret = ath12k_mac_set_key(ar, cache->key_conf.cmd, vif, NULL, - cache->key_conf.key); - if (ret) - ath12k_warn(ab, "unable to apply set key param to vdev %d ret %d\n", - arvif->vdev_id, ret); - } - ath12k_arvif_put_cache(arvif); + if (!list_empty(&cache->key_conf.list)) + ath12k_mac_vif_flush_key_cache(arvif); + + ath12k_ahvif_put_link_cache(ahvif, arvif->link_id); } static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, + struct ath12k_link_vif *arvif, struct ieee80211_chanctx_conf *ctx) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); struct ath12k_hw *ah = hw->priv; - struct ath12k *ar, *prev_ar; + struct ath12k *ar; struct ath12k_base *ab; + u8 link_id = arvif->link_id; int ret; + lockdep_assert_wiphy(hw->wiphy); + if (ah->num_radio == 1) ar = ah->radio; else if (ctx) @@ -6709,27 +7166,20 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw, if (WARN_ON(arvif->is_started)) return NULL; - /* backup the previously used ar ptr since arvif->ar would - * be set to NULL after vdev delete is done - */ - prev_ar = arvif->ar; - mutex_lock(&prev_ar->conf_mutex); - ret = ath12k_mac_vdev_delete(prev_ar, vif); - - if (ret) - ath12k_warn(prev_ar->ab, "unable to delete vdev %d\n", - ret); - mutex_unlock(&prev_ar->conf_mutex); + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); } } ab = ar->ab; - mutex_lock(&ar->conf_mutex); - if (arvif->is_created) goto flush; + /* Assign arvif again here since previous radio switch block + * would've unassigned and cleared it. + */ + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); if (vif->type == NL80211_IFTYPE_AP && ar->num_peers > (ar->max_num_peers - 1)) { ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); @@ -6742,7 +7192,7 @@ static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw, goto unlock; } - ret = ath12k_mac_vdev_create(ar, vif); + ret = ath12k_mac_vdev_create(ar, arvif); if (ret) { ath12k_warn(ab, "failed to create vdev %pM ret %d", vif->addr, ret); goto unlock; @@ -6753,21 +7203,27 @@ flush: * add_interface(), Apply any parameters for the vdev which were received * after add_interface, corresponding to this vif. */ - ath12k_mac_vif_cache_flush(ar, vif); + ath12k_mac_vif_cache_flush(ar, arvif); unlock: - mutex_unlock(&ar->conf_mutex); return arvif->ar; } static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; int i; - memset(arvif, 0, sizeof(*arvif)); + lockdep_assert_wiphy(hw->wiphy); - arvif->vif = vif; + memset(ahvif, 0, sizeof(*ahvif)); + + ahvif->ah = ah; + ahvif->vif = vif; + arvif = &ahvif->deflink; + arvif->ahvif = ahvif; INIT_LIST_HEAD(&arvif->list); INIT_DELAYED_WORK(&arvif->connection_loss_work, @@ -6787,13 +7243,14 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; - - /* For single radio wiphy(i.e ah->num_radio is 1), create the vdev - * during add_interface itself, for multi radio wiphy, defer the vdev - * creation until channel_assign to determine the radio on which the - * vdev needs to be created + /* For non-ml vifs, vif->addr is the actual vdev address but for + * ML vif link(link BSSID) address is the vdev address and it can be a + * different one from vif->addr (i.e ML address). + * Defer vdev creation until assign_chanctx or hw_scan is initiated as driver + * will not know if this interface is an ML vif at this point. */ - ath12k_mac_assign_vif_to_vdev(hw, vif, NULL); + ath12k_mac_assign_vif_to_vdev(hw, arvif, NULL); + return 0; } @@ -6822,14 +7279,16 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif } } -static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif) +static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ath12k_link_vif *arvif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ath12k_base *ab = ar->ab; unsigned long time_left; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + reinit_completion(&ar->vdev_delete_done); ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id); @@ -6850,7 +7309,7 @@ static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif) ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); ar->num_created_vdevs--; - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ar->monitor_vdev_id = -1; ar->monitor_vdev_created = false; } else if (ar->monitor_vdev_created && !ar->monitor_started) { @@ -6866,7 +7325,7 @@ err_vdev_del: spin_unlock_bh(&ar->data_lock); ath12k_peer_cleanup(ar, arvif->vdev_id); - ath12k_arvif_put_cache(arvif); + ath12k_ahvif_put_link_cache(ahvif, arvif->link_id); idr_for_each(&ar->txmgmt_idr, ath12k_mac_vif_txmgmt_idr_remove, vif); @@ -6887,39 +7346,24 @@ err_vdev_del: static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_base *ab; - struct ath12k *ar; - int ret; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + u8 link_id; - if (!arvif->is_created) { + lockdep_assert_wiphy(hw->wiphy); + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { /* if we cached some config but never received assign chanctx, * free the allocated cache. */ - ath12k_arvif_put_cache(arvif); - return; - } - - ar = arvif->ar; - ab = ar->ab; - - cancel_delayed_work_sync(&arvif->connection_loss_work); - - mutex_lock(&ar->conf_mutex); - - ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n", - arvif->vdev_id); + ath12k_ahvif_put_link_cache(ahvif, link_id); + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (!arvif || !arvif->is_created) + continue; - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr); - if (ret) - ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n", - arvif->vdev_id, ret); + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); } - - ath12k_mac_vdev_delete(ar, vif); - - mutex_unlock(&ar->conf_mutex); } /* FIXME: Has to be verified. */ @@ -6938,7 +7382,7 @@ static void ath12k_mac_configure_filter(struct ath12k *ar, bool reset_flag; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ar->filter_flags = total_flags; @@ -6963,14 +7407,12 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; - ar = ath12k_ah_to_ar(ah, 0); + lockdep_assert_wiphy(hw->wiphy); - mutex_lock(&ar->conf_mutex); + ar = ath12k_ah_to_ar(ah, 0); *total_flags &= SUPPORTED_FILTERS; ath12k_mac_configure_filter(ar, *total_flags); - - mutex_unlock(&ar->conf_mutex); } static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) @@ -6980,11 +7422,11 @@ static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 * struct ath12k *ar; int i; + lockdep_assert_wiphy(hw->wiphy); + for_each_ar(ah, ar, i) { - mutex_lock(&ar->conf_mutex); antennas_rx = max_t(u32, antennas_rx, ar->cfg_rx_chainmask); antennas_tx = max_t(u32, antennas_tx, ar->cfg_tx_chainmask); - mutex_unlock(&ar->conf_mutex); } *tx_ant = antennas_tx; @@ -7000,10 +7442,10 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx int ret = 0; int i; + lockdep_assert_wiphy(hw->wiphy); + for_each_ar(ah, ar, i) { - mutex_lock(&ar->conf_mutex); ret = __ath12k_set_antenna(ar, tx_ant, rx_ant); - mutex_unlock(&ar->conf_mutex); if (ret) break; } @@ -7011,13 +7453,13 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx return ret; } -static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif, +static int ath12k_mac_ampdu_action(struct ath12k_link_vif *arvif, struct ieee80211_ampdu_params *params) { struct ath12k *ar = arvif->ar; int ret = -EINVAL; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); switch (params->action) { case IEEE80211_AMPDU_RX_START: @@ -7047,19 +7489,20 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; int ret = -EINVAL; + lockdep_assert_wiphy(hw->wiphy); + ar = ath12k_get_ar_by_vif(hw, vif); if (!ar) return -EINVAL; ar = ath12k_ah_to_ar(ah, 0); + arvif = &ahvif->deflink; - mutex_lock(&ar->conf_mutex); ret = ath12k_mac_ampdu_action(arvif, params); - mutex_unlock(&ar->conf_mutex); - if (ret) ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n", ar->pdev_idx, params->action, ret); @@ -7073,6 +7516,8 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, struct ath12k *ar; struct ath12k_base *ab; + lockdep_assert_wiphy(hw->wiphy); + ar = ath12k_get_ar_by_ctx(hw, ctx); if (!ar) return -EINVAL; @@ -7083,8 +7528,6 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, "mac chanctx add freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); - mutex_lock(&ar->conf_mutex); - spin_lock_bh(&ar->data_lock); /* TODO: In case of multiple channel context, populate rx_channel from * Rx PPDU desc information. @@ -7092,8 +7535,6 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, ar->rx_channel = ctx->def.chan; spin_unlock_bh(&ar->data_lock); - mutex_unlock(&ar->conf_mutex); - return 0; } @@ -7103,6 +7544,8 @@ static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, struct ath12k *ar; struct ath12k_base *ab; + lockdep_assert_wiphy(hw->wiphy); + ar = ath12k_get_ar_by_ctx(hw, ctx); if (!ar) return; @@ -7113,16 +7556,12 @@ static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, "mac chanctx remove freq %u width %d ptr %p\n", ctx->def.chan->center_freq, ctx->def.width, ctx); - mutex_lock(&ar->conf_mutex); - spin_lock_bh(&ar->data_lock); /* TODO: In case of there is one more channel context left, populate * rx_channel with the channel of that remaining channel context. */ ar->rx_channel = NULL; spin_unlock_bh(&ar->data_lock); - - mutex_unlock(&ar->conf_mutex); } static enum wmi_phy_mode @@ -7189,7 +7628,7 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar, } static int -ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, +ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif, struct ieee80211_chanctx_conf *ctx, bool restart) { @@ -7197,10 +7636,11 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, struct ath12k_base *ab = ar->ab; struct wmi_vdev_start_req_arg arg = {}; const struct cfg80211_chan_def *chandef = &ctx->def; - int he_support = arvif->vif->bss_conf.he_support; + struct ath12k_vif *ahvif = arvif->ahvif; + int he_support = ahvif->vif->bss_conf.he_support; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); reinit_completion(&ar->vdev_setup_done); @@ -7216,7 +7656,7 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, arg.mode = ath12k_mac_check_down_grade_phy_mode(ar, arg.mode, chandef->chan->band, - arvif->vif->type); + ahvif->vif->type); arg.min_power = 0; arg.max_power = chandef->chan->max_power * 2; arg.max_reg_power = chandef->chan->max_reg_power * 2; @@ -7236,10 +7676,10 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, return ret; } - if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { - arg.ssid = arvif->u.ap.ssid; - arg.ssid_len = arvif->u.ap.ssid_len; - arg.hidden_ssid = arvif->u.ap.hidden_ssid; + if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { + arg.ssid = ahvif->u.ap.ssid; + arg.ssid_len = ahvif->u.ap.ssid_len; + arg.hidden_ssid = ahvif->u.ap.hidden_ssid; /* For now allow DFS for AP mode */ arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); @@ -7286,7 +7726,7 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, ar->num_started_vdevs++; ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n", - arvif->vif->addr, arvif->vdev_id); + ahvif->vif->addr, arvif->vdev_id); /* Enable CAC Flag in the driver by checking the channel DFS cac time, * i.e dfs_cac_ms value which will be valid only for radar channels @@ -7295,7 +7735,7 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, * during CAC. */ /* TODO: Set the flag for other interface types as required */ - if (arvif->vdev_type == WMI_VDEV_TYPE_AP && + if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP && chandef->chan->dfs_cac_ms && chandef->chan->dfs_state == NL80211_DFS_USABLE) { set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); @@ -7312,13 +7752,13 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, return 0; } -static int ath12k_mac_vdev_start(struct ath12k_vif *arvif, +static int ath12k_mac_vdev_start(struct ath12k_link_vif *arvif, struct ieee80211_chanctx_conf *ctx) { return ath12k_mac_vdev_start_restart(arvif, ctx, false); } -static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif, +static int ath12k_mac_vdev_restart(struct ath12k_link_vif *arvif, struct ieee80211_chanctx_conf *ctx) { return ath12k_mac_vdev_start_restart(arvif, ctx, true); @@ -7336,8 +7776,13 @@ static void ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); struct ath12k_mac_change_chanctx_arg *arg = data; + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(ahvif->ah->hw->wiphy); + + arvif = &ahvif->deflink; if (arvif->ar != arg->ar) return; @@ -7352,9 +7797,14 @@ static void ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); struct ath12k_mac_change_chanctx_arg *arg = data; struct ieee80211_chanctx_conf *ctx; + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(ahvif->ah->hw->wiphy); + + arvif = &ahvif->deflink; if (arvif->ar != arg->ar) return; @@ -7398,13 +7848,13 @@ static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width) } static int ath12k_mac_update_peer_puncturing_width(struct ath12k *ar, - struct ath12k_vif *arvif, + struct ath12k_link_vif *arvif, struct cfg80211_chan_def def) { u32 param_id, param_value; int ret; - if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; param_id = WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP; @@ -7430,17 +7880,19 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, { struct ath12k_wmi_vdev_up_params params = {}; struct ath12k_base *ab = ar->ab; + struct ath12k_link_vif *arvif; struct ieee80211_vif *vif; - struct ath12k_vif *arvif; + struct ath12k_vif *ahvif; int ret; int i; bool monitor_vif = false; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); for (i = 0; i < n_vifs; i++) { vif = vifs[i].vif; - arvif = ath12k_vif_to_arvif(vif); + ahvif = ath12k_vif_to_ahvif(vif); + arvif = &ahvif->deflink; if (vif->type == NL80211_IFTYPE_MONITOR) monitor_vif = true; @@ -7490,10 +7942,14 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, memset(¶ms, 0, sizeof(params)); params.vdev_id = arvif->vdev_id; - params.aid = arvif->aid; + params.aid = ahvif->aid; params.bssid = arvif->bssid; if (vif->mbssid_tx_vif) { - params.tx_bssid = ath12k_vif_to_arvif(vif->mbssid_tx_vif)->bssid; + struct ath12k_vif *ahvif = + ath12k_vif_to_ahvif(vif->mbssid_tx_vif); + struct ath12k_link_vif *arvif = &ahvif->deflink; + + params.tx_bssid = arvif->bssid; params.nontx_profile_idx = vif->bss_conf.bssid_index; params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator; } @@ -7529,7 +7985,7 @@ ath12k_mac_update_active_vif_chan(struct ath12k *ar, struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx, .ar = ar }; struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ieee80211_iterate_active_interfaces_atomic(hw, IEEE80211_IFACE_ITER_NORMAL, @@ -7559,14 +8015,14 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, struct ath12k *ar; struct ath12k_base *ab; + lockdep_assert_wiphy(hw->wiphy); + ar = ath12k_get_ar_by_ctx(hw, ctx); if (!ar) return; ab = ar->ab; - mutex_lock(&ar->conf_mutex); - ath12k_dbg(ab, ATH12K_DBG_MAC, "mac chanctx change freq %u width %d ptr %p changed %x\n", ctx->def.chan->center_freq, ctx->def.width, ctx, changed); @@ -7575,7 +8031,7 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, * switch_vif_chanctx(). */ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) - goto unlock; + return; if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH || changed & IEEE80211_CHANCTX_CHANGE_RADAR || @@ -7583,16 +8039,14 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, ath12k_mac_update_active_vif_chan(ar, ctx); /* TODO: Recalc radar detection */ - -unlock: - mutex_unlock(&ar->conf_mutex); } static int ath12k_start_vdev_delay(struct ath12k *ar, - struct ath12k_vif *arvif) + struct ath12k_link_vif *arvif) { struct ath12k_base *ab = ar->ab; - struct ieee80211_vif *vif = arvif->vif; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); int ret; if (WARN_ON(arvif->is_started)) @@ -7606,7 +8060,7 @@ static int ath12k_start_vdev_delay(struct ath12k *ar, return ret; } - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id); if (ret) { ath12k_warn(ab, "failed put monitor up: %d\n", ret); @@ -7626,24 +8080,37 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_base *ab; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + u8 link_id = link_conf->link_id; + struct ath12k_link_vif *arvif; int ret; + lockdep_assert_wiphy(hw->wiphy); + /* For multi radio wiphy, the vdev was not created during add_interface * create now since we have a channel ctx now to assign to a specific ar/fw */ - ar = ath12k_mac_assign_vif_to_vdev(hw, vif, ctx); - if (!ar) { + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + if (!arvif) { WARN_ON(1); + return -ENOMEM; + } + + if (!arvif->is_started) { + ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx); + if (!ar) + return -EINVAL; + } else { + ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started", + vif->addr, link_id); return -EINVAL; } ab = ar->ab; - mutex_lock(&ar->conf_mutex); - ath12k_dbg(ab, ATH12K_DBG_MAC, "mac chanctx assign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); @@ -7652,8 +8119,8 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, /* for some targets bss peer must be created before vdev_start */ if (ab->hw_params->vdev_start_delay && - arvif->vdev_type != WMI_VDEV_TYPE_AP && - arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && + ahvif->vdev_type != WMI_VDEV_TYPE_AP && + ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && !ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) { memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); ret = 0; @@ -7665,10 +8132,11 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, goto out; } - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath12k_mac_monitor_start(ar); if (ret) goto out; + arvif->is_started = true; goto out; } @@ -7681,7 +8149,7 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, goto out; } - if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created) + if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created) ath12k_mac_monitor_start(ar); arvif->is_started = true; @@ -7689,8 +8157,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, /* TODO: Setup ps and cts/rts protection */ out: - mutex_unlock(&ar->conf_mutex); - return ret; } @@ -7702,9 +8168,15 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, { struct ath12k *ar; struct ath12k_base *ab; - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + u8 link_id = link_conf->link_id; int ret; + lockdep_assert_wiphy(hw->wiphy); + + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + /* The vif is expected to be attached to an ar's VDEV. * We leave the vif/vdev in this function as is * and not delete the vdev symmetric to assign_vif_chanctx() @@ -7712,32 +8184,28 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, * remove_interface() or when there is a change in channel * that moves the vif to a new ar */ - if (!arvif->is_created) + if (!arvif || !arvif->is_created) return; ar = arvif->ar; ab = ar->ab; - mutex_lock(&ar->conf_mutex); - ath12k_dbg(ab, ATH12K_DBG_MAC, "mac chanctx unassign ptr %p vdev_id %i\n", ctx, arvif->vdev_id); WARN_ON(!arvif->is_started); - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { + if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath12k_mac_monitor_stop(ar); - if (ret) { - mutex_unlock(&ar->conf_mutex); + if (ret) return; - } arvif->is_started = false; } - if (arvif->vdev_type != WMI_VDEV_TYPE_STA && - arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { + if (ahvif->vdev_type != WMI_VDEV_TYPE_STA && + ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { ath12k_bss_disassoc(ar, arvif); ret = ath12k_mac_vdev_stop(arvif); if (ret) @@ -7746,11 +8214,12 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, } arvif->is_started = false; - if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && + if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->num_started_vdevs == 1 && ar->monitor_vdev_created) ath12k_mac_monitor_stop(ar); - mutex_unlock(&ar->conf_mutex); + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); } static int @@ -7761,35 +8230,32 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, { struct ath12k *ar; + lockdep_assert_wiphy(hw->wiphy); + ar = ath12k_get_ar_by_ctx(hw, vifs->old_ctx); if (!ar) return -EINVAL; - mutex_lock(&ar->conf_mutex); - /* Switching channels across radio is not allowed */ - if (ar != ath12k_get_ar_by_ctx(hw, vifs->new_ctx)) { - mutex_unlock(&ar->conf_mutex); + if (ar != ath12k_get_ar_by_ctx(hw, vifs->new_ctx)) return -EINVAL; - } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac chanctx switch n_vifs %d mode %d\n", n_vifs, mode); ath12k_mac_update_vif_chan(ar, vifs, n_vifs); - mutex_unlock(&ar->conf_mutex); - return 0; } static int ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret = 0; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + list_for_each_entry(arvif, &ar->arvifs, list) { ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n", param, arvif->vdev_id, value); @@ -7802,7 +8268,7 @@ ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value) break; } } - mutex_unlock(&ar->conf_mutex); + return ret; } @@ -7815,6 +8281,8 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) struct ath12k *ar; int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i; + lockdep_assert_wiphy(hw->wiphy); + /* Currently we set the rts threshold value to all the vifs across * all radios of the single wiphy. * TODO Once support for vif specific RTS threshold in mac80211 is @@ -7844,6 +8312,9 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) * supported. This effectively prevents mac80211 from doing frame * fragmentation in software. */ + + lockdep_assert_wiphy(hw->wiphy); + return -EOPNOTSUPP; } @@ -7888,6 +8359,8 @@ static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *v struct ath12k *ar; int i; + lockdep_assert_wiphy(hw->wiphy); + if (drop) return; @@ -8021,14 +8494,14 @@ ath12k_mac_get_single_legacy_rate(struct ath12k *ar, return 0; } -static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif, +static int ath12k_mac_set_fixed_rate_params(struct ath12k_link_vif *arvif, u32 rate, u8 nss, u8 sgi, u8 ldpc) { struct ath12k *ar = arvif->ar; u32 vdev_param; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n", arvif->vdev_id, rate, nss, sgi); @@ -8100,8 +8573,9 @@ ath12k_mac_vht_mcs_range_present(struct ath12k *ar, static void ath12k_mac_set_bitrate_mask_iter(void *data, struct ieee80211_sta *sta) { - struct ath12k_vif *arvif = data; - struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); + struct ath12k_link_vif *arvif = data; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_link_sta *arsta = &ahsta->deflink; struct ath12k *ar = arvif->ar; if (arsta->arvif != arvif) @@ -8111,14 +8585,15 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data, arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; spin_unlock_bh(&ar->data_lock); - ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk); + wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &arsta->update_wk); } static void ath12k_mac_disable_peer_fixed_rate(void *data, struct ieee80211_sta *sta) { - struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); - struct ath12k_vif *arvif = data; + struct ath12k_link_vif *arvif = data; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_link_sta *arsta = &ahsta->deflink; struct ath12k *ar = arvif->ar; int ret; @@ -8140,9 +8615,10 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct cfg80211_bitrate_mask *mask) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; struct cfg80211_chan_def def; - struct ath12k *ar = arvif->ar; + struct ath12k *ar; enum nl80211_band band; const u8 *ht_mcs_mask; const u16 *vht_mcs_mask; @@ -8154,8 +8630,15 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, int ret; int num_rates; - if (ath12k_mac_vif_chan(vif, &def)) - return -EPERM; + lockdep_assert_wiphy(hw->wiphy); + + arvif = &ahvif->deflink; + + ar = arvif->ar; + if (ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) { + ret = -EPERM; + goto out; + } band = def.chan->band; ht_mcs_mask = mask->control[band].ht_mcs; @@ -8184,9 +8667,9 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, arvif->vdev_id, ret); goto out; } - ieee80211_iterate_stations_atomic(hw, - ath12k_mac_disable_peer_fixed_rate, - arvif); + ieee80211_iterate_stations_mtx(hw, + ath12k_mac_disable_peer_fixed_rate, + arvif); } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask, &single_nss)) { rate = WMI_FIXED_RATE_NONE; @@ -8227,34 +8710,25 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, */ ath12k_warn(ar->ab, "Setting more than one MCS Value in bitrate mask not supported\n"); - ret = -EINVAL; - goto out; + return -EINVAL; } - ieee80211_iterate_stations_atomic(hw, - ath12k_mac_disable_peer_fixed_rate, - arvif); - - mutex_lock(&ar->conf_mutex); + ieee80211_iterate_stations_mtx(hw, + ath12k_mac_disable_peer_fixed_rate, + arvif); arvif->bitrate_mask = *mask; - ieee80211_iterate_stations_atomic(hw, - ath12k_mac_set_bitrate_mask_iter, - arvif); - - mutex_unlock(&ar->conf_mutex); + ieee80211_iterate_stations_mtx(hw, + ath12k_mac_set_bitrate_mask_iter, + arvif); } - mutex_lock(&ar->conf_mutex); - ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); if (ret) { ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n", arvif->vdev_id, ret); } - mutex_unlock(&ar->conf_mutex); - out: return ret; } @@ -8266,9 +8740,12 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; struct ath12k_base *ab; - struct ath12k_vif *arvif; + struct ath12k_vif *ahvif; + struct ath12k_link_vif *arvif; int recovery_count, i; + lockdep_assert_wiphy(hw->wiphy); + if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) return; @@ -8281,8 +8758,6 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, ieee80211_wake_queues(hw); for_each_ar(ah, ar, i) { - mutex_lock(&ar->conf_mutex); - ab = ar->ab; ath12k_warn(ar->ab, "pdev %d successfully recovered\n", @@ -8307,11 +8782,12 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, } list_for_each_entry(arvif, &ar->arvifs, list) { + ahvif = arvif->ahvif; ath12k_dbg(ab, ATH12K_DBG_BOOT, "reconfig cipher %d up %d vdev type %d\n", - arvif->key_cipher, + ahvif->key_cipher, arvif->is_up, - arvif->vdev_type); + ahvif->vdev_type); /* After trigger disconnect, then upper layer will * trigger connect again, then the PN number of @@ -8319,16 +8795,14 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, * side, hence PN number mismatch will not happen. */ if (arvif->is_up && - arvif->vdev_type == WMI_VDEV_TYPE_STA && - arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) { - ieee80211_hw_restart_disconnect(arvif->vif); + ahvif->vdev_type == WMI_VDEV_TYPE_STA && + ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) { + ieee80211_hw_restart_disconnect(ahvif->vif); ath12k_dbg(ab, ATH12K_DBG_BOOT, "restart disconnect\n"); } } - - mutex_unlock(&ar->conf_mutex); } } @@ -8339,7 +8813,7 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar, int ret; enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) || ar->rx_channel != channel) @@ -8371,6 +8845,8 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, struct ieee80211_supported_band *sband; struct survey_info *ar_survey; + lockdep_assert_wiphy(hw->wiphy); + if (idx >= ATH12K_NUM_CHANS) return -ENOENT; @@ -8404,8 +8880,6 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, ar_survey = &ar->survey[idx]; - mutex_lock(&ar->conf_mutex); - ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); spin_lock_bh(&ar->data_lock); @@ -8417,7 +8891,6 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, if (ar->rx_channel == survey->channel) survey->filled |= SURVEY_INFO_IN_USE; - mutex_unlock(&ar->conf_mutex); return 0; } @@ -8426,7 +8899,12 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct station_info *sinfo) { - struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); + struct ath12k_link_sta *arsta; + + lockdep_assert_wiphy(hw->wiphy); + + arsta = &ahsta->deflink; sinfo->rx_duration = arsta->rx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); @@ -8463,7 +8941,7 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, ar = ath12k_ah_to_ar(ah, 0); - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); spin_lock_bh(&ar->data_lock); ar->scan.roc_notify = false; @@ -8471,8 +8949,6 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, ath12k_scan_abort(ar); - mutex_unlock(&ar->conf_mutex); - cancel_delayed_work_sync(&ar->scan.timeout); return 0; @@ -8484,24 +8960,31 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw, int duration, enum ieee80211_roc_type type) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k_wmi_scan_req_arg arg; - struct ath12k *ar, *prev_ar; + struct ath12k_link_vif *arvif; + struct ath12k *ar; u32 scan_time_msec; bool create = true; + u8 link_id; int ret; - if (ah->num_radio == 1) { - WARN_ON(!arvif->is_created); - ar = ath12k_ah_to_ar(ah, 0); - goto scan; - } + lockdep_assert_wiphy(hw->wiphy); ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq); - if (!ar) - return -EINVAL; + if (!ar) { + ret = -EINVAL; + goto exit; + } + /* check if any of the links of ML VIF is already started on + * radio(ar) correpsondig to given scan frequency and use it, + * if not use deflink(link 0) for scan purpose. + */ + + link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar); + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); /* If the vif is already assigned to a specific vdev of an ar, * check whether its already started, vdev which is started * are not allowed to switch to a new radio. @@ -8512,44 +8995,35 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw, * always on the same band for the vif */ if (arvif->is_created) { - if (WARN_ON(!arvif->ar)) - return -EINVAL; + if (WARN_ON(!arvif->ar)) { + ret = -EINVAL; + goto exit; + } - if (ar != arvif->ar && arvif->is_started) - return -EBUSY; + if (ar != arvif->ar && arvif->is_started) { + ret = -EBUSY; + goto exit; + } if (ar != arvif->ar) { - /* backup the previously used ar ptr, since the vdev delete - * would assign the arvif->ar to NULL after the call - */ - prev_ar = arvif->ar; - mutex_lock(&prev_ar->conf_mutex); - ret = ath12k_mac_vdev_delete(prev_ar, vif); - mutex_unlock(&prev_ar->conf_mutex); - if (ret) { - ath12k_warn(prev_ar->ab, - "unable to delete scan vdev for roc: %d\n", - ret); - return ret; - } + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); } else { create = false; } } if (create) { - mutex_lock(&ar->conf_mutex); - ret = ath12k_mac_vdev_create(ar, vif); - mutex_unlock(&ar->conf_mutex); + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + + ret = ath12k_mac_vdev_create(ar, arvif); if (ret) { ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n", ret); - return -EINVAL; + goto exit; } } -scan: - mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); switch (ar->scan.state) { @@ -8624,9 +9098,8 @@ scan: free_chan_list: kfree(arg.chan_list); -exit: - mutex_unlock(&ar->conf_mutex); +exit: return ret; } @@ -8634,16 +9107,20 @@ static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); - struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_rekey_data *rekey_data; struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(hw->wiphy); + + arvif = &ahvif->deflink; + rekey_data = &arvif->rekey_data; ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set rekey data vdev %d\n", arvif->vdev_id); - mutex_lock(&ar->conf_mutex); - memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN); memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN); @@ -8660,8 +9137,6 @@ static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw, rekey_data->kck, NL80211_KEK_LEN); ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "replay ctr", NULL, &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr)); - - mutex_unlock(&ar->conf_mutex); } static const struct ieee80211_ops ath12k_ops = { @@ -8674,7 +9149,9 @@ static const struct ieee80211_ops ath12k_ops = { .remove_interface = ath12k_mac_op_remove_interface, .update_vif_offload = ath12k_mac_op_update_vif_offload, .config = ath12k_mac_op_config, - .bss_info_changed = ath12k_mac_op_bss_info_changed, + .link_info_changed = ath12k_mac_op_link_info_changed, + .vif_cfg_changed = ath12k_mac_op_vif_cfg_changed, + .change_vif_links = ath12k_mac_op_change_vif_links, .configure_filter = ath12k_mac_op_configure_filter, .hw_scan = ath12k_mac_op_hw_scan, .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan, @@ -9321,7 +9798,7 @@ static void ath12k_mac_setup(struct ath12k *ar) spin_lock_init(&ar->data_lock); INIT_LIST_HEAD(&ar->arvifs); INIT_LIST_HEAD(&ar->ppdu_stats_info); - mutex_init(&ar->conf_mutex); + init_completion(&ar->vdev_setup_done); init_completion(&ar->vdev_delete_done); init_completion(&ar->peer_assoc_done); @@ -9502,7 +9979,7 @@ err: return ret; } -int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif, +int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif, enum wmi_sta_keepalive_method method, u32 interval) { @@ -9510,9 +9987,9 @@ int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif, struct ath12k *ar = arvif->ar; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) return 0; if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map)) diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index 5efbb6822628..d382337ba649 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -41,6 +41,9 @@ struct ath12k_generic_iter { #define ATH12K_TX_POWER_MAX_VAL 70 #define ATH12K_TX_POWER_MIN_VAL 0 +#define ATH12K_DEFAULT_LINK_ID 0 +#define ATH12K_INVALID_LINK_ID 255 + enum ath12k_supported_bw { ATH12K_BW_20 = 0, ATH12K_BW_40 = 1, @@ -65,9 +68,9 @@ u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, void __ath12k_mac_scan_finish(struct ath12k *ar); void ath12k_mac_scan_finish(struct ath12k *ar); -struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id); -struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, - u32 vdev_id); +struct ath12k_link_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id); +struct ath12k_link_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, + u32 vdev_id); struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id); struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id); @@ -82,7 +85,7 @@ int ath12k_mac_rfkill_config(struct ath12k *ar); int ath12k_mac_wait_tx_complete(struct ath12k *ar); void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb); void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id); -int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif, +int ath12k_mac_vif_set_keepalive(struct ath12k_link_vif *arvif, enum wmi_sta_keepalive_method method, u32 interval); u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar); diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c index df96b0f91f54..2f6d14382ed7 100644 --- a/drivers/net/wireless/ath/ath12k/mhi.c +++ b/drivers/net/wireless/ath/ath12k/mhi.c @@ -649,3 +649,8 @@ void ath12k_mhi_resume(struct ath12k_pci *ab_pci) { ath12k_mhi_set_state(ab_pci, ATH12K_MHI_RESUME); } + +void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic) +{ + mhi_download_rddm_image(mhi_ctrl, in_panic); +} diff --git a/drivers/net/wireless/ath/ath12k/mhi.h b/drivers/net/wireless/ath/ath12k/mhi.h index 9362ad1958c3..7358b8477536 100644 --- a/drivers/net/wireless/ath/ath12k/mhi.h +++ b/drivers/net/wireless/ath/ath12k/mhi.h @@ -43,5 +43,5 @@ void ath12k_mhi_clear_vector(struct ath12k_base *ab); void ath12k_mhi_suspend(struct ath12k_pci *ar_pci); void ath12k_mhi_resume(struct ath12k_pci *ar_pci); - +void ath12k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic); #endif diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c index 3a851ee15b2f..84cccf7d91e7 100644 --- a/drivers/net/wireless/ath/ath12k/p2p.c +++ b/drivers/net/wireless/ath/ath12k/p2p.c @@ -69,20 +69,20 @@ static size_t ath12k_p2p_noa_ie_len_compute(const struct ath12k_wmi_p2p_noa_info return len; } -static void ath12k_p2p_noa_ie_assign(struct ath12k_vif *arvif, void *ie, +static void ath12k_p2p_noa_ie_assign(struct ath12k_link_vif *arvif, void *ie, size_t len) { struct ath12k *ar = arvif->ar; lockdep_assert_held(&ar->data_lock); - kfree(arvif->u.ap.noa_data); + kfree(arvif->ahvif->u.ap.noa_data); - arvif->u.ap.noa_data = ie; - arvif->u.ap.noa_len = len; + arvif->ahvif->u.ap.noa_data = ie; + arvif->ahvif->u.ap.noa_len = len; } -static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif, +static void __ath12k_p2p_noa_update(struct ath12k_link_vif *arvif, const struct ath12k_wmi_p2p_noa_info *noa) { struct ath12k *ar = arvif->ar; @@ -105,7 +105,7 @@ static void __ath12k_p2p_noa_update(struct ath12k_vif *arvif, ath12k_p2p_noa_ie_assign(arvif, ie, len); } -void ath12k_p2p_noa_update(struct ath12k_vif *arvif, +void ath12k_p2p_noa_update(struct ath12k_link_vif *arvif, const struct ath12k_wmi_p2p_noa_info *noa) { struct ath12k *ar = arvif->ar; @@ -118,9 +118,12 @@ void ath12k_p2p_noa_update(struct ath12k_vif *arvif, static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); struct ath12k_p2p_noa_arg *arg = data; + struct ath12k_link_vif *arvif; + WARN_ON(!rcu_read_lock_any_held()); + arvif = &ahvif->deflink; if (arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id) return; diff --git a/drivers/net/wireless/ath/ath12k/p2p.h b/drivers/net/wireless/ath/ath12k/p2p.h index b2eec51a9900..03ee877e6d6b 100644 --- a/drivers/net/wireless/ath/ath12k/p2p.h +++ b/drivers/net/wireless/ath/ath12k/p2p.h @@ -16,7 +16,7 @@ struct ath12k_p2p_noa_arg { const struct ath12k_wmi_p2p_noa_info *noa; }; -void ath12k_p2p_noa_update(struct ath12k_vif *arvif, +void ath12k_p2p_noa_update(struct ath12k_link_vif *arvif, const struct ath12k_wmi_p2p_noa_info *noa); void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id, const struct ath12k_wmi_p2p_noa_info *noa); diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index bd269aa1740b..cf907550e6a4 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -7,6 +7,8 @@ #include <linux/module.h> #include <linux/msi.h> #include <linux/pci.h> +#include <linux/time.h> +#include <linux/vmalloc.h> #include "pci.h" #include "core.h" @@ -425,9 +427,9 @@ static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab) } } -static void ath12k_pci_ce_tasklet(struct tasklet_struct *t) +static void ath12k_pci_ce_workqueue(struct work_struct *work) { - struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); + struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq); int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); @@ -449,7 +451,7 @@ static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg) disable_irq_nosync(ab->irq_num[irq_idx]); - tasklet_schedule(&ce_pipe->intr_tq); + queue_work(system_bh_wq, &ce_pipe->intr_wq); return IRQ_HANDLED; } @@ -675,7 +677,7 @@ static int ath12k_pci_config_irq(struct ath12k_base *ab) irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; - tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet); + INIT_WORK(&ce_pipe->intr_wq, ath12k_pci_ce_workqueue); ret = request_irq(irq, ath12k_pci_ce_interrupt_handler, ab_pci->irq_flags, irq_name[irq_idx], @@ -962,7 +964,7 @@ static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) PCI_EXP_LNKCTL_ASPMC); } -static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) +static void ath12k_pci_cancel_workqueue(struct ath12k_base *ab) { int i; @@ -972,7 +974,7 @@ static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) continue; - tasklet_kill(&ce_pipe->intr_tq); + cancel_work_sync(&ce_pipe->intr_wq); } } @@ -980,7 +982,7 @@ static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab) { ath12k_pci_ce_irqs_disable(ab); ath12k_pci_sync_ce_irqs(ab); - ath12k_pci_kill_tasklets(ab); + ath12k_pci_cancel_workqueue(ab); } int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, @@ -1259,6 +1261,186 @@ void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value) ab_pci->pci_ops->release(ab); } +#ifdef CONFIG_ATH12K_COREDUMP +static int ath12k_pci_coredump_calculate_size(struct ath12k_base *ab, u32 *dump_seg_sz) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; + struct image_info *rddm_img, *fw_img; + struct ath12k_tlv_dump_data *dump_tlv; + enum ath12k_fw_crash_dump_type mem_type; + u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0; + struct ath12k_dump_file_data *file_data; + int i; + + rddm_img = mhi_ctrl->rddm_image; + if (!rddm_img) { + ath12k_err(ab, "No RDDM dump found\n"); + return 0; + } + + fw_img = mhi_ctrl->fbc_image; + + for (i = 0; i < fw_img->entries ; i++) { + if (!fw_img->mhi_buf[i].buf) + continue; + + paging_tlv_sz += fw_img->mhi_buf[i].len; + } + dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz; + + for (i = 0; i < rddm_img->entries; i++) { + if (!rddm_img->mhi_buf[i].buf) + continue; + + rddm_tlv_sz += rddm_img->mhi_buf[i].len; + } + dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz; + + for (i = 0; i < ab->qmi.mem_seg_count; i++) { + mem_type = ath12k_coredump_get_dump_type(ab->qmi.target_mem[i].type); + + if (mem_type == FW_CRASH_DUMP_NONE) + continue; + + if (mem_type == FW_CRASH_DUMP_TYPE_MAX) { + ath12k_dbg(ab, ATH12K_DBG_PCI, + "target mem region type %d not supported", + ab->qmi.target_mem[i].type); + continue; + } + + if (!ab->qmi.target_mem[i].paddr) + continue; + + dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size; + } + + for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) { + if (!dump_seg_sz[i]) + continue; + + len += sizeof(*dump_tlv) + dump_seg_sz[i]; + } + + if (len) + len += sizeof(*file_data); + + return len; +} + +static void ath12k_pci_coredump_download(struct ath12k_base *ab) +{ + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); + struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; + struct image_info *rddm_img, *fw_img; + struct timespec64 timestamp; + int i, len, mem_idx; + enum ath12k_fw_crash_dump_type mem_type; + struct ath12k_dump_file_data *file_data; + struct ath12k_tlv_dump_data *dump_tlv; + size_t hdr_len = sizeof(*file_data); + void *buf; + u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 }; + + ath12k_mhi_coredump(mhi_ctrl, false); + + len = ath12k_pci_coredump_calculate_size(ab, dump_seg_sz); + if (!len) { + ath12k_warn(ab, "No crash dump data found for devcoredump"); + return; + } + + rddm_img = mhi_ctrl->rddm_image; + fw_img = mhi_ctrl->fbc_image; + + /* dev_coredumpv() requires vmalloc data */ + buf = vzalloc(len); + if (!buf) + return; + + ab->dump_data = buf; + ab->ath12k_coredump_len = len; + file_data = ab->dump_data; + strscpy(file_data->df_magic, "ATH12K-FW-DUMP", sizeof(file_data->df_magic)); + file_data->len = cpu_to_le32(len); + file_data->version = cpu_to_le32(ATH12K_FW_CRASH_DUMP_V2); + file_data->chip_id = cpu_to_le32(ab_pci->dev_id); + file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id); + file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus)); + guid_gen(&file_data->guid); + ktime_get_real_ts64(×tamp); + file_data->tv_sec = cpu_to_le64(timestamp.tv_sec); + file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec); + buf += hdr_len; + dump_tlv = buf; + dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA); + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]); + buf += COREDUMP_TLV_HDR_SIZE; + + /* append all segments together as they are all part of a single contiguous + * block of memory + */ + for (i = 0; i < fw_img->entries ; i++) { + if (!fw_img->mhi_buf[i].buf) + continue; + + memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf, + fw_img->mhi_buf[i].len); + buf += fw_img->mhi_buf[i].len; + } + + dump_tlv = buf; + dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA); + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]); + buf += COREDUMP_TLV_HDR_SIZE; + + /* append all segments together as they are all part of a single contiguous + * block of memory + */ + for (i = 0; i < rddm_img->entries; i++) { + if (!rddm_img->mhi_buf[i].buf) + continue; + + memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf, + rddm_img->mhi_buf[i].len); + buf += rddm_img->mhi_buf[i].len; + } + + mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA; + for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) { + if (!dump_seg_sz[mem_idx] || mem_idx == FW_CRASH_DUMP_NONE) + continue; + + dump_tlv = buf; + dump_tlv->type = cpu_to_le32(mem_idx); + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]); + buf += COREDUMP_TLV_HDR_SIZE; + + for (i = 0; i < ab->qmi.mem_seg_count; i++) { + mem_type = ath12k_coredump_get_dump_type + (ab->qmi.target_mem[i].type); + + if (mem_type != mem_idx) + continue; + + if (!ab->qmi.target_mem[i].paddr) { + ath12k_dbg(ab, ATH12K_DBG_PCI, + "Skipping mem region type %d", + ab->qmi.target_mem[i].type); + continue; + } + + memcpy_fromio(buf, ab->qmi.target_mem[i].v.ioaddr, + ab->qmi.target_mem[i].size); + buf += ab->qmi.target_mem[i].size; + } + } + + queue_work(ab->workqueue, &ab->dump_work); +} +#endif + int ath12k_pci_power_up(struct ath12k_base *ab) { struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); @@ -1329,6 +1511,9 @@ static const struct ath12k_hif_ops ath12k_pci_hif_ops = { .ce_irq_disable = ath12k_pci_hif_ce_irq_disable, .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx, .panic_handler = ath12k_pci_panic_handler, +#ifdef CONFIG_ATH12K_COREDUMP + .coredump_download = ath12k_pci_coredump_download, +#endif }; static @@ -1538,6 +1723,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev) set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags); cancel_work_sync(&ab->reset_work); + cancel_work_sync(&ab->dump_work); ath12k_core_deinit(ab); qmi_fail: diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c index 19c0626fbff1..7a62665b8af9 100644 --- a/drivers/net/wireless/ath/ath12k/peer.c +++ b/drivers/net/wireless/ath/ath12k/peer.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -186,7 +186,7 @@ void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id) struct ath12k_peer *peer, *tmp; struct ath12k_base *ab = ar->ab; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); spin_lock_bh(&ab->base_lock); list_for_each_entry_safe(peer, tmp, &ab->peers, list) { @@ -235,7 +235,7 @@ int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr) { int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); reinit_completion(&ar->peer_delete_done); @@ -261,14 +261,15 @@ static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true); } -int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif, +int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif, struct ieee80211_sta *sta, struct ath12k_wmi_peer_create_arg *arg) { + struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); struct ath12k_peer *peer; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); if (ar->num_peers > (ar->max_num_peers - 1)) { ath12k_warn(ar->ab, @@ -326,7 +327,7 @@ int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif, peer->pdev_idx = ar->pdev_idx; peer->sta = sta; - if (arvif->vif->type == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION) { arvif->ast_hash = peer->ast_hash; arvif->ast_idx = peer->hw_peer_id; } diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h index 7b3500b5c8c2..b955f0cdf598 100644 --- a/drivers/net/wireless/ath/ath12k/peer.h +++ b/drivers/net/wireless/ath/ath12k/peer.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_PEER_H @@ -59,7 +59,7 @@ struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab, struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id); void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id); int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr); -int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif, +int ath12k_peer_create(struct ath12k *ar, struct ath12k_link_vif *arvif, struct ieee80211_sta *sta, struct ath12k_wmi_peer_create_arg *arg); int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id, diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h index a0db6702a189..10366bbe9999 100644 --- a/drivers/net/wireless/ath/ath12k/rx_desc.h +++ b/drivers/net/wireless/ath/ath12k/rx_desc.h @@ -684,18 +684,17 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO5_SA_IDX_TIMEOUT BIT(0) #define RX_MSDU_END_INFO5_DA_IDX_TIMEOUT BIT(1) +#define RX_MSDU_END_INFO5_TO_DS BIT(2) +#define RX_MSDU_END_INFO5_TID GENMASK(6, 3) #define RX_MSDU_END_INFO5_SA_IS_VALID BIT(7) #define RX_MSDU_END_INFO5_DA_IS_VALID BIT(8) #define RX_MSDU_END_INFO5_DA_IS_MCBC BIT(9) #define RX_MSDU_END_INFO5_L3_HDR_PADDING GENMASK(11, 10) #define RX_MSDU_END_INFO5_FIRST_MSDU BIT(12) #define RX_MSDU_END_INFO5_LAST_MSDU BIT(13) +#define RX_MSDU_END_INFO5_FROM_DS BIT(14) #define RX_MSDU_END_INFO5_IP_CHKSUM_FAIL_COPY BIT(15) -#define RX_MSDU_END_QCN9274_INFO5_TO_DS BIT(2) -#define RX_MSDU_END_QCN9274_INFO5_TID GENMASK(6, 3) -#define RX_MSDU_END_QCN9274_INFO5_FROM_DS BIT(14) - #define RX_MSDU_END_INFO6_MSDU_DROP BIT(0) #define RX_MSDU_END_INFO6_REO_DEST_IND GENMASK(5, 1) #define RX_MSDU_END_INFO6_FLOW_IDX GENMASK(25, 6) @@ -709,14 +708,14 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO7_FLOW_AGGR_CONTN BIT(8) #define RX_MSDU_END_INFO7_FISA_TIMEOUT BIT(9) -#define RX_MSDU_END_QCN9274_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10) -#define RX_MSDU_END_QCN9274_INFO7_MSDU_LIMIT_ERROR BIT(11) -#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_TIMEOUT BIT(12) -#define RX_MSDU_END_QCN9274_INFO7_FLOW_IDX_INVALID BIT(13) -#define RX_MSDU_END_QCN9274_INFO7_CCE_MATCH BIT(14) -#define RX_MSDU_END_QCN9274_INFO7_AMSDU_PARSER_ERR BIT(15) +#define RX_MSDU_END_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10) +#define RX_MSDU_END_INFO7_MSDU_LIMIT_ERROR BIT(11) +#define RX_MSDU_END_INFO7_FLOW_IDX_TIMEOUT BIT(12) +#define RX_MSDU_END_INFO7_FLOW_IDX_INVALID BIT(13) +#define RX_MSDU_END_INFO7_CCE_MATCH BIT(14) +#define RX_MSDU_END_INFO7_AMSDU_PARSER_ERR BIT(15) -#define RX_MSDU_END_QCN9274_INFO8_KEY_ID GENMASK(7, 0) +#define RX_MSDU_END_INFO8_KEY_ID GENMASK(7, 0) #define RX_MSDU_END_INFO9_SERVICE_CODE GENMASK(14, 6) #define RX_MSDU_END_INFO9_PRIORITY_VALID BIT(15) @@ -758,8 +757,8 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO12_RECV_BW GENMASK(20, 18) #define RX_MSDU_END_INFO12_RECEPTION_TYPE GENMASK(23, 21) -#define RX_MSDU_END_QCN9274_INFO12_MIMO_SS_BITMAP GENMASK(30, 24) -#define RX_MSDU_END_QCN9274_INFO12_MIMO_DONE_COPY BIT(31) +#define RX_MSDU_END_INFO12_MIMO_SS_BITMAP GENMASK(30, 24) +#define RX_MSDU_END_INFO12_MIMO_DONE_COPY BIT(31) #define RX_MSDU_END_INFO13_FIRST_MPDU BIT(0) #define RX_MSDU_END_INFO13_MCAST_BCAST BIT(2) @@ -791,7 +790,7 @@ enum rx_msdu_start_reception_type { #define RX_MSDU_END_INFO13_UNDECRYPT_FRAME_ERR BIT(30) #define RX_MSDU_END_INFO13_FCS_ERR BIT(31) -#define RX_MSDU_END_QCN9274_INFO13_WIFI_PARSER_ERR BIT(15) +#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15) #define RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE GENMASK(12, 10) #define RX_MSDU_END_INFO14_RX_BITMAP_NOT_UPDED BIT(13) @@ -889,65 +888,6 @@ struct rx_msdu_end_qcn9274_compact { __le32 info14; } __packed; -/* These macro definitions are only used for WCN7850 */ -#define RX_MSDU_END_WCN7850_INFO2_KEY_ID BIT(7, 0) - -#define RX_MSDU_END_WCN7850_INFO5_MSDU_LIMIT_ERR BIT(2) -#define RX_MSDU_END_WCN7850_INFO5_IDX_TIMEOUT BIT(3) -#define RX_MSDU_END_WCN7850_INFO5_IDX_INVALID BIT(4) -#define RX_MSDU_END_WCN7850_INFO5_WIFI_PARSE_ERR BIT(5) -#define RX_MSDU_END_WCN7850_INFO5_AMSDU_PARSER_ERR BIT(6) -#define RX_MSDU_END_WCN7850_INFO5_TCPUDP_CSUM_FAIL_CPY BIT(14) - -#define RX_MSDU_END_WCN7850_INFO12_MIMO_SS_BITMAP GENMASK(31, 24) - -#define RX_MSDU_END_WCN7850_INFO13_FRAGMENT_FLAG BIT(13) -#define RX_MSDU_END_WCN7850_INFO13_CCE_MATCH BIT(15) - -struct rx_msdu_end_wcn7850 { - __le16 info0; - __le16 phy_ppdu_id; - __le16 ip_hdr_cksum; - __le16 info1; - __le16 info2; - __le16 cumulative_l3_checksum; - __le32 rule_indication0; - __le32 rule_indication1; - __le16 info3; - __le16 l3_type; - __le32 ipv6_options_crc; - __le32 tcp_seq_num; - __le32 tcp_ack_num; - __le16 info4; - __le16 window_size; - __le16 tcp_udp_chksum; - __le16 info5; - __le16 sa_idx; - __le16 da_idx_or_sw_peer_id; - __le32 info6; - __le32 fse_metadata; - __le16 cce_metadata; - __le16 sa_sw_peer_id; - __le16 info7; - __le16 rsvd0; - __le16 cumulative_l4_checksum; - __le16 cumulative_ip_length; - __le32 info9; - __le32 info10; - __le32 info11; - __le32 toeplitz_hash_2_or_4; - __le32 flow_id_toeplitz; - __le32 info12; - __le32 ppdu_start_timestamp_31_0; - __le32 ppdu_start_timestamp_63_32; - __le32 phy_meta_data; - __le16 vlan_ctag_ci; - __le16 vlan_stag_ci; - __le32 rsvd[3]; - __le32 info13; - __le32 info14; -} __packed; - /* rx_msdu_end * * rxpcu_mpdu_filter_in_category @@ -1578,7 +1518,7 @@ struct rx_pkt_hdr_tlv { struct hal_rx_desc_wcn7850 { __le64 msdu_end_tag; - struct rx_msdu_end_wcn7850 msdu_end; + struct rx_msdu_end_qcn9274 msdu_end; u8 rx_padding0[RX_BE_PADDING0_BYTES]; __le64 mpdu_start_tag; struct rx_mpdu_start_qcn9274 mpdu_start; diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 2cd3ff9b0164..dced2aa9ba1a 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -6687,7 +6687,8 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, const u32 *vdev_ids) { int i; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; + struct ath12k_vif *ahvif; /* Finish CSA once the switch count becomes NULL */ if (ev->current_switch_count) @@ -6702,9 +6703,10 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, vdev_ids[i]); continue; } + ahvif = arvif->ahvif; - if (arvif->is_up && arvif->vif->bss_conf.csa_active) - ieee80211_csa_finish(arvif->vif, 0); + if (arvif->is_up && ahvif->vif->bss_conf.csa_active) + ieee80211_csa_finish(ahvif->vif, 0); } rcu_read_unlock(); } @@ -7098,7 +7100,7 @@ static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab, struct sk_buff *skb) { const struct wmi_gtk_offload_status_event *ev; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; __be64 replay_ctr_be; u64 replay_ctr; const void **tb; @@ -7136,7 +7138,7 @@ static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab, /* supplicant expects big-endian replay counter */ replay_ctr_be = cpu_to_be64(replay_ctr); - ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, + ieee80211_gtk_rekey_notify(arvif->ahvif->vif, arvif->bssid, (void *)&replay_ctr_be, GFP_ATOMIC); rcu_read_unlock(); @@ -7284,9 +7286,11 @@ static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab, u32 pdev_idx) { int status; - u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL, - ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, - ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 }; + static const u32 svc_id[] = { + ATH12K_HTC_SVC_ID_WMI_CONTROL, + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1, + ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 + }; struct ath12k_htc_svc_conn_req conn_req = {}; struct ath12k_htc_svc_conn_resp conn_resp = {}; @@ -7372,13 +7376,13 @@ ath12k_wmi_send_unit_test_cmd(struct ath12k *ar, int ath12k_wmi_simulate_radar(struct ath12k *ar) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; u32 dfs_args[DFS_MAX_TEST_ARGS]; struct wmi_unit_test_cmd wmi_ut; bool arvif_found = false; list_for_each_entry(arvif, &ar->arvifs, list) { - if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) { + if (arvif->is_started && arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP) { arvif_found = true; break; } @@ -7940,7 +7944,7 @@ static void ath12k_wmi_fill_arp_offload(struct ath12k *ar, } int ath12k_wmi_arp_ns_offload(struct ath12k *ar, - struct ath12k_vif *arvif, + struct ath12k_link_vif *arvif, struct wmi_arp_ns_offload_arg *offload, bool enable) { @@ -7989,7 +7993,7 @@ int ath12k_wmi_arp_ns_offload(struct ath12k *ar, } int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, - struct ath12k_vif *arvif, bool enable) + struct ath12k_link_vif *arvif, bool enable) { struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; struct wmi_gtk_rekey_offload_cmd *cmd; @@ -8026,7 +8030,7 @@ int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, } int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, - struct ath12k_vif *arvif) + struct ath12k_link_vif *arvif) { struct wmi_gtk_rekey_offload_cmd *cmd; struct sk_buff *skb; diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index 6a913f9b8315..6f55dbdf629d 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -24,7 +24,7 @@ struct ath12k_base; struct ath12k; -struct ath12k_vif; +struct ath12k_link_vif; /* There is no signed version of __le32, so for a temporary solution come * up with our own version. The idea is from fs/ntfs/endian.h. @@ -5627,13 +5627,13 @@ int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg); int ath12k_wmi_arp_ns_offload(struct ath12k *ar, - struct ath12k_vif *arvif, + struct ath12k_link_vif *arvif, struct wmi_arp_ns_offload_arg *offload, bool enable); int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, - struct ath12k_vif *arvif, bool enable); + struct ath12k_link_vif *arvif, bool enable); int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, - struct ath12k_vif *arvif); + struct ath12k_link_vif *arvif); int ath12k_wmi_sta_keepalive(struct ath12k *ar, const struct wmi_sta_keepalive_arg *arg); diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c index 9b8684abbe40..9e1c0bfd212f 100644 --- a/drivers/net/wireless/ath/ath12k/wow.c +++ b/drivers/net/wireless/ath/ath12k/wow.c @@ -29,11 +29,11 @@ static const struct wiphy_wowlan_support ath12k_wowlan_support = { .max_pkt_offset = WOW_MAX_PKT_OFFSET, }; -static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *arvif) +static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *ahvif) { - return (arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE || - arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT || - arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO); + return (ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE || + ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT || + ahvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO); } int ath12k_wow_enable(struct ath12k *ar) @@ -101,7 +101,7 @@ int ath12k_wow_wakeup(struct ath12k *ar) return 0; } -static int ath12k_wow_vif_cleanup(struct ath12k_vif *arvif) +static int ath12k_wow_vif_cleanup(struct ath12k_link_vif *arvif) { struct ath12k *ar = arvif->ar; int i, ret; @@ -129,10 +129,10 @@ static int ath12k_wow_vif_cleanup(struct ath12k_vif *arvif) static int ath12k_wow_cleanup(struct ath12k *ar) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); list_for_each_entry(arvif, &ar->arvifs, list) { ret = ath12k_wow_vif_cleanup(arvif); @@ -191,7 +191,7 @@ ath12k_wow_convert_8023_to_80211(struct ath12k *ar, memcpy(bytemask, eth_bytemask, eth_pat_len); pat_len = eth_pat_len; - } else if (eth_pkt_ofs + eth_pat_len < prot_ofs) { + } else if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) { memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs); memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs); @@ -354,7 +354,7 @@ ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id, return 0; } -static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif, +static int ath12k_wow_vif_set_wakeups(struct ath12k_link_vif *arvif, struct cfg80211_wowlan *wowlan) { const struct cfg80211_pkt_pattern *patterns = wowlan->patterns; @@ -364,7 +364,7 @@ static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif, int ret, i, j; /* Setup requested WOW features */ - switch (arvif->vdev_type) { + switch (arvif->ahvif->vdev_type) { case WMI_VDEV_TYPE_IBSS: __set_bit(WOW_BEACON_EVENT, &wow_mask); fallthrough; @@ -473,13 +473,13 @@ static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif, static int ath12k_wow_set_wakeups(struct ath12k *ar, struct cfg80211_wowlan *wowlan) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); list_for_each_entry(arvif, &ar->arvifs, list) { - if (ath12k_wow_is_p2p_vdev(arvif)) + if (ath12k_wow_is_p2p_vdev(arvif->ahvif)) continue; ret = ath12k_wow_vif_set_wakeups(arvif, wowlan); if (ret) { @@ -518,11 +518,11 @@ out: return ret; } -static int ath12k_wow_vif_clean_nlo(struct ath12k_vif *arvif) +static int ath12k_wow_vif_clean_nlo(struct ath12k_link_vif *arvif) { struct ath12k *ar = arvif->ar; - switch (arvif->vdev_type) { + switch (arvif->ahvif->vdev_type) { case WMI_VDEV_TYPE_STA: return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id); default: @@ -532,13 +532,13 @@ static int ath12k_wow_vif_clean_nlo(struct ath12k_vif *arvif) static int ath12k_wow_nlo_cleanup(struct ath12k *ar) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); list_for_each_entry(arvif, &ar->arvifs, list) { - if (ath12k_wow_is_p2p_vdev(arvif)) + if (ath12k_wow_is_p2p_vdev(arvif->ahvif)) continue; ret = ath12k_wow_vif_clean_nlo(arvif); @@ -555,13 +555,13 @@ static int ath12k_wow_nlo_cleanup(struct ath12k *ar) static int ath12k_wow_set_hw_filter(struct ath12k *ar) { struct wmi_hw_data_filter_arg arg; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); list_for_each_entry(arvif, &ar->arvifs, list) { - if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) continue; arg.vdev_id = arvif->vdev_id; @@ -581,13 +581,13 @@ static int ath12k_wow_set_hw_filter(struct ath12k *ar) static int ath12k_wow_clear_hw_filter(struct ath12k *ar) { struct wmi_hw_data_filter_arg arg; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); list_for_each_entry(arvif, &ar->arvifs, list) { - if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA) continue; arg.vdev_id = arvif->vdev_id; @@ -626,10 +626,10 @@ static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab, } } -static void ath12k_wow_prepare_ns_offload(struct ath12k_vif *arvif, +static void ath12k_wow_prepare_ns_offload(struct ath12k_link_vif *arvif, struct wmi_arp_ns_offload_arg *offload) { - struct net_device *ndev = ieee80211_vif_to_wdev(arvif->vif)->netdev; + struct net_device *ndev = ieee80211_vif_to_wdev(arvif->ahvif->vif)->netdev; struct ath12k_base *ab = arvif->ar->ab; struct inet6_ifaddr *ifa6; struct ifacaddr6 *ifaca6; @@ -710,10 +710,10 @@ unlock: ath12k_wow_generate_ns_mc_addr(ab, offload); } -static void ath12k_wow_prepare_arp_offload(struct ath12k_vif *arvif, +static void ath12k_wow_prepare_arp_offload(struct ath12k_link_vif *arvif, struct wmi_arp_ns_offload_arg *offload) { - struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_vif *vif = arvif->ahvif->vif; struct ieee80211_vif_cfg vif_cfg = vif->cfg; struct ath12k_base *ab = arvif->ar->ab; u32 ipv4_cnt; @@ -732,22 +732,25 @@ static void ath12k_wow_prepare_arp_offload(struct ath12k_vif *arvif, static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable) { struct wmi_arp_ns_offload_arg *offload; - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; + struct ath12k_vif *ahvif; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); offload = kmalloc(sizeof(*offload), GFP_KERNEL); if (!offload) return -ENOMEM; list_for_each_entry(arvif, &ar->arvifs, list) { - if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + ahvif = arvif->ahvif; + + if (ahvif->vdev_type != WMI_VDEV_TYPE_STA) continue; memset(offload, 0, sizeof(*offload)); - memcpy(offload->mac_addr, arvif->vif->addr, ETH_ALEN); + memcpy(offload->mac_addr, ahvif->vif->addr, ETH_ALEN); ath12k_wow_prepare_ns_offload(arvif, offload); ath12k_wow_prepare_arp_offload(arvif, offload); @@ -766,13 +769,13 @@ static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable) static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); list_for_each_entry(arvif, &ar->arvifs, list) { - if (arvif->vdev_type != WMI_VDEV_TYPE_STA || + if (arvif->ahvif->vdev_type != WMI_VDEV_TYPE_STA || !arvif->is_up || !arvif->rekey_data.enable_offload) continue; @@ -824,10 +827,10 @@ static int ath12k_wow_set_keepalive(struct ath12k *ar, enum wmi_sta_keepalive_method method, u32 interval) { - struct ath12k_vif *arvif; + struct ath12k_link_vif *arvif; int ret; - lockdep_assert_held(&ar->conf_mutex); + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); list_for_each_entry(arvif, &ar->arvifs, list) { ret = ath12k_mac_vif_set_keepalive(arvif, method, interval); @@ -845,7 +848,7 @@ int ath12k_wow_op_suspend(struct ieee80211_hw *hw, struct ath12k *ar = ath12k_ah_to_ar(ah, 0); int ret; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); ret = ath12k_wow_cleanup(ar); if (ret) { @@ -913,7 +916,6 @@ cleanup: ath12k_wow_cleanup(ar); exit: - mutex_unlock(&ar->conf_mutex); return ret ? 1 : 0; } @@ -922,9 +924,9 @@ void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled) struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar = ath12k_ah_to_ar(ah, 0); - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); + device_set_wakeup_enable(ar->ab->dev, enabled); - mutex_unlock(&ar->conf_mutex); } int ath12k_wow_op_resume(struct ieee80211_hw *hw) @@ -933,7 +935,7 @@ int ath12k_wow_op_resume(struct ieee80211_hw *hw) struct ath12k *ar = ath12k_ah_to_ar(ah, 0); int ret; - mutex_lock(&ar->conf_mutex); + lockdep_assert_wiphy(hw->wiphy); ret = ath12k_hif_resume(ar->ab); if (ret) { @@ -995,7 +997,6 @@ exit: } } - mutex_unlock(&ar->conf_mutex); return ret; } diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index f27308ccb2f1..cb3e891ee1bd 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -218,10 +218,10 @@ static void ath_ahb_remove(struct platform_device *pdev) } static struct platform_driver ath_ahb_driver = { - .probe = ath_ahb_probe, - .remove_new = ath_ahb_remove, - .driver = { - .name = "ar231x-wmac", + .probe = ath_ahb_probe, + .remove = ath_ahb_remove, + .driver = { + .name = "ar231x-wmac", }, }; diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index b51fce5ae260..f5ca2fe0d074 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -46,6 +46,8 @@ static const struct pci_device_id ath5k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */ { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */ { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */ + { PCI_VDEVICE(ATHEROS, 0xff16) }, /* Gigaset SX76[23] AR241[34]A */ + { PCI_VDEVICE(ATHEROS, 0xff1a) }, /* Arcadyan ARV45XX AR2417 */ { PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */ { 0 } }; diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h index b4fcfb72991c..68384159870b 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.h +++ b/drivers/net/wireless/ath/ath6kl/wmi.h @@ -1249,7 +1249,7 @@ struct wmi_rssi_threshold_params_cmd { /* highest of upper */ a_sle16 thresh_above6_val; - /* lowest of bellow */ + /* lowest of below */ a_sle16 thresh_below1_val; a_sle16 thresh_below2_val; @@ -1257,7 +1257,7 @@ struct wmi_rssi_threshold_params_cmd { a_sle16 thresh_below4_val; a_sle16 thresh_below5_val; - /* highest of bellow */ + /* highest of below */ a_sle16 thresh_below6_val; /* "alpha" */ @@ -1287,13 +1287,13 @@ struct wmi_snr_threshold_params_cmd { /* highest of upper */ u8 thresh_above4_val; - /* lowest of bellow */ + /* lowest of below */ u8 thresh_below1_val; u8 thresh_below2_val; u8 thresh_below3_val; - /* highest of bellow */ + /* highest of below */ u8 thresh_below4_val; u8 reserved[3]; diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index 1a6697b6e3b4..d4805e02b927 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -158,12 +158,12 @@ static void ath_ahb_remove(struct platform_device *pdev) } static struct platform_driver ath_ahb_driver = { - .probe = ath_ahb_probe, - .remove_new = ath_ahb_remove, - .driver = { - .name = "ath9k", + .probe = ath_ahb_probe, + .remove = ath_ahb_remove, + .driver = { + .name = "ath9k", }, - .id_table = ath9k_platform_id_table, + .id_table = ath9k_platform_id_table, }; MODULE_DEVICE_TABLE(platform, ath9k_platform_id_table); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c index d0f1e8bcc846..45a7ca660f47 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_aic.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c @@ -409,13 +409,11 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah) sram.valid = true; sram.rot_dir_att_db = - min(max(rot_dir_path_att_db, - (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB), - ATH_AIC_MAX_ROT_DIR_ATT_DB); + clamp(rot_dir_path_att_db, (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB, + ATH_AIC_MAX_ROT_DIR_ATT_DB); sram.rot_quad_att_db = - min(max(rot_quad_path_att_db, - (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB), - ATH_AIC_MAX_ROT_QUAD_ATT_DB); + clamp(rot_quad_path_att_db, (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB, + ATH_AIC_MAX_ROT_QUAD_ATT_DB); aic->aic_sram[i] = (SM(sram.vga_dir_sign, AR_PHY_AIC_SRAM_VGA_DIR_SIGN) | diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c index 004ca5f536be..fe1013a3a588 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c +++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c @@ -18,7 +18,6 @@ #include <linux/pci.h> #include <linux/delay.h> #include <linux/platform_device.h> -#include <linux/ath9k_platform.h> #include <linux/nvmem-consumer.h> #include <linux/workqueue.h> diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index 9b393a8f7c3a..ad3a3fda1b9c 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -16,7 +16,6 @@ #include <linux/export.h> #include <linux/types.h> -#include <linux/ath9k_platform.h> #include "hw.h" enum ath_bt_mode { @@ -115,23 +114,14 @@ static void ath9k_hw_btcoex_pin_init(struct ath_hw *ah, u8 wlanactive_gpio, u8 btactive_gpio, u8 btpriority_gpio) { struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; - struct ath9k_platform_data *pdata = ah->dev->platform_data; if (btcoex_hw->scheme != ATH_BTCOEX_CFG_2WIRE && btcoex_hw->scheme != ATH_BTCOEX_CFG_3WIRE) return; - /* bt priority GPIO will be ignored by 2 wire scheme */ - if (pdata && (pdata->bt_active_pin || pdata->bt_priority_pin || - pdata->wlan_active_pin)) { - btcoex_hw->btactive_gpio = pdata->bt_active_pin; - btcoex_hw->wlanactive_gpio = pdata->wlan_active_pin; - btcoex_hw->btpriority_gpio = pdata->bt_priority_pin; - } else { - btcoex_hw->btactive_gpio = btactive_gpio; - btcoex_hw->wlanactive_gpio = wlanactive_gpio; - btcoex_hw->btpriority_gpio = btpriority_gpio; - } + btcoex_hw->btactive_gpio = btactive_gpio; + btcoex_hw->wlanactive_gpio = wlanactive_gpio; + btcoex_hw->btpriority_gpio = btpriority_gpio; } void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index efb7889142d4..df58dc02e104 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -15,7 +15,6 @@ */ #include "hw.h" -#include <linux/ath9k_platform.h> void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val) { @@ -119,14 +118,6 @@ static bool ath9k_hw_nvram_read_array(u16 *blob, size_t blob_size, return true; } -static bool ath9k_hw_nvram_read_pdata(struct ath9k_platform_data *pdata, - off_t offset, u16 *data) -{ - return ath9k_hw_nvram_read_array(pdata->eeprom_data, - ARRAY_SIZE(pdata->eeprom_data), - offset, data); -} - static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob, off_t offset, u16 *data) { @@ -146,15 +137,12 @@ static bool ath9k_hw_nvram_read_nvmem(struct ath_hw *ah, off_t offset, bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data) { struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_platform_data *pdata = ah->dev->platform_data; bool ret; if (ah->nvmem_blob) ret = ath9k_hw_nvram_read_nvmem(ah, off, data); else if (ah->eeprom_blob) ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data); - else if (pdata && !pdata->use_eeprom) - ret = ath9k_hw_nvram_read_pdata(pdata, off, data); else ret = common->bus_ops->eeprom_read(common, off, data); diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index eb631fd3336d..b5257b2b4aa5 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -294,6 +294,9 @@ int htc_connect_service(struct htc_target *target, return -ETIMEDOUT; } + if (target->conn_rsp_epid < 0 || target->conn_rsp_epid >= ENDPOINT_MAX) + return -EINVAL; + *conn_rsp_epid = target->conn_rsp_epid; return 0; err: diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index c3a6368bfc68..e2bef099adb3 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -490,7 +490,7 @@ static void ath9k_hw_init_macaddr(struct ath_hw *ah) u16 eeval; static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW }; - /* MAC address may already be loaded via ath9k_platform_data */ + /* MAC address may already be loaded via NVMEM */ if (is_valid_ether_addr(common->macaddr)) return; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 7fad7e75af6a..f9e77c4624d9 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -18,7 +18,6 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> -#include <linux/ath9k_platform.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_net.h> @@ -583,8 +582,8 @@ static int ath9k_nvmem_request_eeprom(struct ath_softc *sc) /* nvmem cell might not be defined, or the nvmem * subsystem isn't included. In this case, follow - * the established "just return 0;" convention of - * ath9k_init_platform to say: + * the established "just return 0;" convention + * to say: * "All good. Nothing to see here. Please go on." */ if (err == -ENOENT || err == -EOPNOTSUPP) @@ -620,49 +619,6 @@ static int ath9k_nvmem_request_eeprom(struct ath_softc *sc) return 0; } -static int ath9k_init_platform(struct ath_softc *sc) -{ - struct ath9k_platform_data *pdata = sc->dev->platform_data; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - int ret; - - if (!pdata) - return 0; - - if (!pdata->use_eeprom) { - ah->ah_flags &= ~AH_USE_EEPROM; - ah->gpio_mask = pdata->gpio_mask; - ah->gpio_val = pdata->gpio_val; - ah->led_pin = pdata->led_pin; - ah->is_clk_25mhz = pdata->is_clk_25mhz; - ah->get_mac_revision = pdata->get_mac_revision; - ah->external_reset = pdata->external_reset; - ah->disable_2ghz = pdata->disable_2ghz; - ah->disable_5ghz = pdata->disable_5ghz; - - if (!pdata->endian_check) - ah->ah_flags |= AH_NO_EEP_SWAP; - } - - if (pdata->eeprom_name) { - ret = ath9k_eeprom_request(sc, pdata->eeprom_name); - if (ret) - return ret; - } - - if (pdata->led_active_high) - ah->config.led_active_high = true; - - if (pdata->tx_gain_buffalo) - ah->config.tx_gain_buffalo = true; - - if (pdata->macaddr) - ether_addr_copy(common->macaddr, pdata->macaddr); - - return 0; -} - static int ath9k_of_init(struct ath_softc *sc) { struct device_node *np = sc->dev->of_node; @@ -748,10 +704,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, */ ath9k_init_pcoem_platform(sc); - ret = ath9k_init_platform(sc); - if (ret) - return ret; - ret = ath9k_of_init(sc); if (ret) return ret; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 408776562a7e..8557d4826a46 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1679,10 +1679,10 @@ static const struct of_device_id wcn36xx_of_match[] = { MODULE_DEVICE_TABLE(of, wcn36xx_of_match); static struct platform_driver wcn36xx_driver = { - .probe = wcn36xx_probe, - .remove_new = wcn36xx_remove, - .driver = { - .name = "wcn36xx", + .probe = wcn36xx_probe, + .remove = wcn36xx_remove, + .driver = { + .name = "wcn36xx", .of_match_table = wcn36xx_of_match, }, }; diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index bccc27de848d..7ee79593cd23 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -167,7 +167,7 @@ struct wcn36xx_vif { * @dpu_desc_index: DPU descriptor index is returned from HW after config_sta * call and is used in TX BD. * @bss_sta_index: STA index is returned from HW after config_bss call and is - * used in both SMD channel and TX BD. See table bellow when it is used. + * used in both SMD channel and TX BD. See table below when it is used. * @bss_dpu_desc_index: DPU descriptor index is returned from HW after * config_bss call and is used in TX BD. * ______________________________________________ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 17f6b33beabd..42d991d9f8cb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -770,7 +770,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, sdiodev->settings->bus.sdio.txglomsz); - nents += (nents >> 4) + 1; + nents *= 2; WARN_ON(nents > sdiodev->max_segment_count); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 58d50918dd17..cfcf01eb0daa 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -594,7 +594,7 @@ static void brcmf_common_pd_remove(struct platform_device *pdev) } static struct platform_driver brcmf_pd = { - .remove_new = brcmf_common_pd_remove, + .remove = brcmf_common_pd_remove, .driver = { .name = BRCMFMAC_PDATA_NAME, } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index ae98e371dbfd..c1f18e2fe540 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -112,9 +112,8 @@ int brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, } strreplace(board_type, '/', '-'); settings->board_type = board_type; - - of_node_put(root); } + of_node_put(root); clk = devm_clk_get_optional_enabled_with_rate(dev, "lpo", 32768); if (IS_ERR(clk)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c index bd480239368a..80c35027787a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c @@ -1426,15 +1426,6 @@ int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub, return -ENOSPC; } -void dma_txflush(struct dma_pub *pub) -{ - struct dma_info *di = container_of(pub, struct dma_info, dma); - struct brcms_ampdu_session *session = &di->ampdu_session; - - if (!skb_queue_empty(&session->skb_list)) - ampdu_finalize(di); -} - int dma_txpending(struct dma_pub *pub) { struct dma_info *di = container_of(pub, struct dma_info, dma); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h index ff5b80b09046..7905fd081721 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h @@ -88,7 +88,6 @@ bool dma_txreset(struct dma_pub *pub); void dma_txinit(struct dma_pub *pub); int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub, struct sk_buff *p0); -void dma_txflush(struct dma_pub *pub); int dma_txpending(struct dma_pub *pub); void dma_kick_tx(struct dma_pub *pub); void dma_txsuspend(struct dma_pub *pub); diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c index 7e41cb7bbfe0..dc4e91f58bb4 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c @@ -867,8 +867,8 @@ void libipw_rx_any(struct libipw_device *ieee, switch (ieee->iw_mode) { case IW_MODE_ADHOC: /* our BSS and not from/to DS */ - if (ether_addr_equal(hdr->addr3, ieee->bssid)) - if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) { + if (ether_addr_equal(hdr->addr3, ieee->bssid) && + ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == 0)) { /* promisc: get all */ if (ieee->dev->flags & IFF_PROMISC) is_packet_for_us = 1; @@ -882,8 +882,8 @@ void libipw_rx_any(struct libipw_device *ieee, break; case IW_MODE_INFRA: /* our BSS (== from our AP) and from DS */ - if (ether_addr_equal(hdr->addr2, ieee->bssid)) - if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) { + if (ether_addr_equal(hdr->addr2, ieee->bssid) && + ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS)) { /* promisc: get all */ if (ieee->dev->flags & IFF_PROMISC) is_packet_for_us = 1; diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index 14d2331ee6cb..b0656b143f77 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -566,7 +566,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status); - rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; + return; } /* Convert 3945's rssi indicator to dBm */ diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index fcccde7bb659..05c4af41bdb9 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -664,7 +664,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status)); - rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; + return; } /* This will be used in several places later */ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index ea40d402681d..cd1fe8490ae5 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -13,7 +13,7 @@ #define IWL_BZ_UCODE_API_MAX 94 /* Lowest firmware API version supported */ -#define IWL_BZ_UCODE_API_MIN 90 +#define IWL_BZ_UCODE_API_MIN 92 /* NVM versions */ #define IWL_BZ_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index 635fed641c19..fc5e6e44c6aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -13,7 +13,7 @@ #define IWL_SC_UCODE_API_MAX 94 /* Lowest firmware API version supported */ -#define IWL_SC_UCODE_API_MIN 90 +#define IWL_SC_UCODE_API_MIN 92 /* NVM versions */ #define IWL_SC_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h index 2397fdc37fc5..9b942c4aabd9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h @@ -56,8 +56,6 @@ struct iwl_binding_cmd { } __packed; /* BINDING_CMD_API_S_VER_2 */ #define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) -#define IWL_LMAC_24G_INDEX 0 -#define IWL_LMAC_5G_INDEX 1 /* The maximal number of fragments in the FW's schedule session */ #define IWL_MVM_MAX_QUOTA 128 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h index 1fa5678c1cd6..a9fa5f054ce0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h @@ -40,4 +40,7 @@ enum iwl_ctxt_action { FW_CTXT_ACTION_REMOVE, }; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ +#define IWL_LMAC_24G_INDEX 0 +#define IWL_LMAC_5G_INDEX 1 + #endif /* __iwl_fw_api_context_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index ffee7927cf26..c2362bc786b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -368,7 +368,7 @@ enum iwl_wowlan_flags { }; /** - * struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6) + * struct iwl_wowlan_config_cmd_v6 - WoWLAN configuration (versions 5 and 6) * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters * @non_qos_seq: non-QoS sequence counter to use next. * Reserved if the struct has version >= 6. @@ -380,7 +380,7 @@ enum iwl_wowlan_flags { * @sta_id: station ID for wowlan. * @reserved: reserved */ -struct iwl_wowlan_config_cmd { +struct iwl_wowlan_config_cmd_v6 { __le32 wakeup_filter; __le16 non_qos_seq; __le16 qos_seq[8]; @@ -390,7 +390,27 @@ struct iwl_wowlan_config_cmd { u8 flags; u8 sta_id; u8 reserved; -} __packed; /* WOWLAN_CONFIG_API_S_VER_5 */ +} __packed; /* WOWLAN_CONFIG_API_S_VER_6 */ + +/** + * struct iwl_wowlan_config_cmd - WoWLAN configuration + * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters + * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down + * @is_11n_connection: indicates HT connection + * @offloading_tid: TID reserved for firmware use + * @flags: extra flags, see &enum iwl_wowlan_flags + * @sta_id: station ID for wowlan. + * @reserved: reserved + */ +struct iwl_wowlan_config_cmd { + __le32 wakeup_filter; + u8 wowlan_ba_teardown_tids; + u8 is_11n_connection; + u8 offloading_tid; + u8 flags; + u8 sta_id; + u8 reserved[3]; +} __packed; /* WOWLAN_CONFIG_API_S_VER_7 */ #define IWL_NUM_RSC 16 #define WOWLAN_KEY_MAX_SIZE 32 @@ -890,7 +910,7 @@ struct iwl_wowlan_mlo_gtk { } __packed; /* WOWLAN_MLO_GTK_KEY_API_S_VER_1 */ /** - * struct iwl_wowlan_info_notif - WoWLAN information notification + * struct iwl_wowlan_info_notif_v4 - WoWLAN information notification * @gtk: GTK data * @igtk: IGTK data * @bigtk: BIGTK data @@ -910,7 +930,7 @@ struct iwl_wowlan_mlo_gtk { * @reserved2: reserved * @mlo_gtks: array of GTKs of size num_mlo_link_keys for version >= 4 */ -struct iwl_wowlan_info_notif { +struct iwl_wowlan_info_notif_v4 { struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM]; @@ -930,6 +950,45 @@ struct iwl_wowlan_info_notif { } __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3, _VER_4 */ /** + * struct iwl_wowlan_info_notif - WoWLAN information notification + * @gtk: GTK data + * @igtk: IGTK data + * @bigtk: BIGTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched patterns + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @tid_tear_down: bit mask of tids whose BA sessions were closed + * in suspend state + * @station_id: station id + * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs + * following this notif + * @tid_offloaded_tx: tid used by the firmware to transmit data packets + * while in wowlan + * @mlo_gtks: array of GTKs of size num_mlo_link_keys + */ +struct iwl_wowlan_info_notif { + struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM]; + struct iwl_wowlan_igtk_status bigtk[WOWLAN_BIGTK_KEYS_NUM]; + __le64 replay_ctr; + __le16 pattern_number; + __le16 qos_seq_ctr; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + u8 tid_tear_down; + u8 station_id; + u8 num_mlo_link_keys; + u8 tid_offloaded_tx; + struct iwl_wowlan_mlo_gtk mlo_gtks[]; +} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_5 */ + +/** * struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification * @wake_packet_length: wakeup packet length * @station_id: station id diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index 30a54c7fa001..b8dff139aa05 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -616,6 +616,9 @@ struct iwl_tof_range_req_ap_entry_v2 { * continue with the session and will provide the LMR feedback. * @IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC: send an incorrect SAC in the * first NDP exchange. This is used for testing. + * @IWL_INITIATOR_AP_FLAGS_TEST_BAD_SLTF: use incorrect secure LTF tx key. This + * is used for testing. Only supported from version 15 of the range request + * command. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), @@ -633,6 +636,7 @@ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_PMF = BIT(14), IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15), IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC = BIT(16), + IWL_INITIATOR_AP_FLAGS_TEST_BAD_SLTF = BIT(17), }; /** @@ -767,7 +771,7 @@ enum iwl_location_cipher { * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, - * otherwise should be set to &IWL_MVM_INVALID_STA. + * otherwise should be set to &IWL_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement @@ -814,7 +818,7 @@ struct iwl_tof_range_req_ap_entry_v6 { * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, - * otherwise should be set to &IWL_MVM_INVALID_STA. + * otherwise should be set to &IWL_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement @@ -827,10 +831,10 @@ struct iwl_tof_range_req_ap_entry_v6 { * &IWL_INITIATOR_AP_FLAGS_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. */ struct iwl_tof_range_req_ap_entry_v7 { __le32 initiator_ap_flags; @@ -872,7 +876,7 @@ struct iwl_tof_range_req_ap_entry_v7 { * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, - * otherwise should be set to &IWL_MVM_INVALID_STA. + * otherwise should be set to &IWL_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement @@ -885,10 +889,10 @@ struct iwl_tof_range_req_ap_entry_v7 { * &IWL_INITIATOR_AP_FLAGS_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. * bits 0 - 2: max LTF repetitions * bits 3 - 5: max number of spatial streams @@ -946,7 +950,7 @@ struct iwl_tof_range_req_ap_entry_v8 { * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, - * otherwise should be set to &IWL_MVM_INVALID_STA. + * otherwise should be set to &IWL_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement @@ -961,10 +965,10 @@ struct iwl_tof_range_req_ap_entry_v8 { * &IWL_INITIATOR_AP_FLAGS_TB or &IWL_INITIATOR_AP_FLAGS_NON_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. * bits 0 - 2: max LTF repetitions * bits 3 - 5: max number of spatial streams @@ -1029,7 +1033,7 @@ struct iwl_tof_range_req_ap_entry_v9 { * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of * the number of measurement iterations (min 2^0 = 1, max 2^14) * @sta_id: the station id of the AP. Only relevant when associated to the AP, - * otherwise should be set to &IWL_MVM_INVALID_STA. + * otherwise should be set to &IWL_INVALID_STA. * @cipher: pairwise cipher suite for secured measurement. * &enum iwl_location_cipher. * @hltk: HLTK to be used for secured 11az measurement @@ -1042,10 +1046,10 @@ struct iwl_tof_range_req_ap_entry_v9 { * &IWL_INITIATOR_AP_FLAGS_TB is set. * @rx_pn: the next expected PN for protected management frames Rx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @tx_pn: the next PN to use for protected management frames Tx. LE byte * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id - * is set to &IWL_MVM_INVALID_STA. + * is set to &IWL_INVALID_STA. * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. * bits 0 - 2: max LTF repetitions * bits 3 - 5: max number of spatial streams diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 977ca4ac166d..26301c0b06a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -17,7 +17,7 @@ #define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) #define IWL_STATION_COUNT_MAX 16 -#define IWL_MVM_INVALID_STA 0xFF +#define IWL_INVALID_STA 0xFF enum iwl_ac { AC_BK, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c index 3cabdeb53e99..8e0c85a1240d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c @@ -537,13 +537,18 @@ bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id) /* cf. struct iwl_error_event_table */ u32 valid; __le32 err_id; - } err_info; + } err_info = {}; + int ret; if (!base) return false; - iwl_trans_read_mem_bytes(trans, base, - &err_info, sizeof(err_info)); + ret = iwl_trans_read_mem_bytes(trans, base, + &err_info, sizeof(err_info)); + + if (ret) + return true; + if (err_info.valid && err_id) *err_id = le32_to_cpu(err_info.err_id); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index e95ffe303547..c70da7281551 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1074,12 +1074,13 @@ int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, void iwl_trans_debugfs_cleanup(struct iwl_trans *trans); #endif -#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ - do { \ - if (__builtin_constant_p(bufsize)) \ - BUILD_BUG_ON((bufsize) % sizeof(u32)); \ - iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ - } while (0) +#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ + ({ \ + if (__builtin_constant_p(bufsize)) \ + BUILD_BUG_ON((bufsize) % sizeof(u32)); \ + iwl_trans_read_mem(trans, addr, buf, \ + (bufsize) / sizeof(u32)); \ + }) int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index b607961970e9..36726ea4b822 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -689,7 +689,7 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * Rssi update while not associated - can happen since the statistics * are handled asynchronously */ - if (mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA) + if (mvmvif->deflink.ap_sta_id == IWL_INVALID_STA) return; /* No BT - reports should be disabled */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 795a166ed63a..776600ddaea6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -19,6 +19,7 @@ #define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5 #define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH 15 #define IWL_MVM_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11 +#define IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH -72 #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 89b273e02849..f85c01e04ebf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -922,7 +922,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) static int iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, - struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct iwl_wowlan_config_cmd_v6 *wowlan_config_cmd, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, struct ieee80211_sta *ap_sta) { @@ -948,7 +948,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret); } - iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); + if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) < 7) + iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd); if (wowlan->disconnect) wowlan_config_cmd->wakeup_filter |= @@ -1122,7 +1123,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm, static int iwl_mvm_wowlan_config(struct iwl_mvm *mvm, struct cfg80211_wowlan *wowlan, - struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct iwl_wowlan_config_cmd_v6 *wowlan_config_cmd_v6, struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif, struct iwl_mvm_vif_link_info *mvm_link, struct ieee80211_sta *ap_sta) @@ -1131,7 +1132,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - mvm->offload_tid = wowlan_config_cmd->offloading_tid; + mvm->offload_tid = wowlan_config_cmd_v6->offloading_tid; if (!unified_image) { ret = iwl_mvm_switch_to_d3(mvm); @@ -1147,9 +1148,26 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, if (ret) return ret; - ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, - sizeof(*wowlan_config_cmd), - wowlan_config_cmd); + if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_CONFIGURATION, 0) > 6) { + struct iwl_wowlan_config_cmd wowlan_config_cmd = { + .wakeup_filter = wowlan_config_cmd_v6->wakeup_filter, + .wowlan_ba_teardown_tids = + wowlan_config_cmd_v6->wowlan_ba_teardown_tids, + .is_11n_connection = + wowlan_config_cmd_v6->is_11n_connection, + .offloading_tid = wowlan_config_cmd_v6->offloading_tid, + .flags = wowlan_config_cmd_v6->flags, + .sta_id = wowlan_config_cmd_v6->sta_id, + }; + + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, + sizeof(wowlan_config_cmd), + &wowlan_config_cmd); + } else { + ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0, + sizeof(*wowlan_config_cmd_v6), + wowlan_config_cmd_v6); + } if (ret) return ret; @@ -1288,7 +1306,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, goto out_noreset; } - if (mvm_link->ap_sta_id == IWL_MVM_INVALID_STA) { + if (mvm_link->ap_sta_id == IWL_INVALID_STA) { /* if we're not associated, this must be netdetect */ if (!wowlan->nd_config) { ret = 1; @@ -1302,7 +1320,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, mvm->net_detect = true; } else { - struct iwl_wowlan_config_cmd wowlan_config_cmd = { + struct iwl_wowlan_config_cmd_v6 wowlan_config_cmd = { .offloading_tid = 0, }; @@ -1425,6 +1443,7 @@ struct iwl_wowlan_status_data { u16 non_qos_seq_ctr; u16 qos_seq_ctr[8]; u8 tid_tear_down; + u8 tid_offloaded_tx; struct { /* including RX MIC key for TKIP */ @@ -2474,7 +2493,64 @@ static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status, static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, struct iwl_wowlan_info_notif *data, struct iwl_wowlan_status_data *status, - u32 len, bool has_mlo_keys) + u32 len) +{ + u32 expected_len = sizeof(*data) + + data->num_mlo_link_keys * sizeof(status->mlo_keys[0]); + + if (!data) { + IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n"); + status = NULL; + return; + } + + if (len < expected_len) { + IWL_ERR(mvm, "Invalid WoWLAN info notification!\n"); + status = NULL; + return; + } + + if (mvm->fast_resume) + return; + + iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); + iwl_mvm_convert_gtk_v3(status, data->gtk); + iwl_mvm_convert_igtk(status, &data->igtk[0]); + iwl_mvm_convert_bigtk(status, data->bigtk); + status->replay_ctr = le64_to_cpu(data->replay_ctr); + status->pattern_number = le16_to_cpu(data->pattern_number); + status->tid_offloaded_tx = data->tid_offloaded_tx; + if (IWL_FW_CHECK(mvm, + data->tid_offloaded_tx >= + ARRAY_SIZE(status->qos_seq_ctr), + "tid_offloaded_tx is out of bound %d\n", + data->tid_offloaded_tx)) + data->tid_offloaded_tx = 0; + status->qos_seq_ctr[data->tid_offloaded_tx] = + le16_to_cpu(data->qos_seq_ctr); + status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); + status->num_of_gtk_rekeys = + le32_to_cpu(data->num_of_gtk_rekeys); + status->received_beacons = le32_to_cpu(data->received_beacons); + status->tid_tear_down = data->tid_tear_down; + + if (data->num_mlo_link_keys) { + status->num_mlo_keys = data->num_mlo_link_keys; + if (IWL_FW_CHECK(mvm, + status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS, + "Too many mlo keys: %d, max %d\n", + status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS)) + status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS; + memcpy(status->mlo_keys, data->mlo_gtks, + status->num_mlo_keys * sizeof(status->mlo_keys[0])); + } +} + +static void +iwl_mvm_parse_wowlan_info_notif_v4(struct iwl_mvm *mvm, + struct iwl_wowlan_info_notif_v4 *data, + struct iwl_wowlan_status_data *status, + u32 len, bool has_mlo_keys) { u32 i; u32 expected_len = sizeof(*data); @@ -2746,6 +2822,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int link_id = vif->active_links ? __ffs(vif->active_links) : 0; struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id]; + int wowlan_info_ver = iwl_fw_lookup_notif_ver(mvm->fw, + PROT_OFFLOAD_GROUP, + WOWLAN_INFO_NOTIFICATION, + IWL_FW_CMD_VER_UNKNOWN); if (WARN_ON(!mvm_link)) goto out_unlock; @@ -2760,11 +2840,14 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, if (!mvm_ap_sta) goto out_unlock; - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { - u16 seq = status->qos_seq_ctr[i]; - /* firmware stores last-used value, we store next value */ - seq += 0x10; - mvm_ap_sta->tid_data[i].seq_number = seq; + /* firmware stores last-used value, we store next value */ + if (wowlan_info_ver >= 5) { + mvm_ap_sta->tid_data[status->tid_offloaded_tx].seq_number = + status->qos_seq_ctr[status->tid_offloaded_tx] + 0x10; + } else { + for (i = 0; i < IWL_MAX_TID_COUNT; i++) + mvm_ap_sta->tid_data[i].seq_number = + status->qos_seq_ctr[i] + 0x10; } if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { @@ -3026,7 +3109,6 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, ieee80211_resume_disconnect(vif); } - static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -3076,7 +3158,7 @@ iwl_mvm_choose_query_wakeup_reasons(struct iwl_mvm *mvm, /* if FW uses status notification, status shouldn't be NULL here */ if (!d3_data->status) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u8 sta_id = mvm->net_detect ? IWL_MVM_INVALID_STA : + u8 sta_id = mvm->net_detect ? IWL_INVALID_STA : mvmvif->deflink.ap_sta_id; /* bug - FW with MLO has status notification */ @@ -3248,13 +3330,19 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, iwl_mvm_parse_wowlan_info_notif_v2(mvm, notif_v2, d3_data->status, len); + } else if (wowlan_info_ver < 5) { + struct iwl_wowlan_info_notif_v4 *notif = + (void *)pkt->data; + + iwl_mvm_parse_wowlan_info_notif_v4(mvm, notif, + d3_data->status, len, + wowlan_info_ver > 3); } else { struct iwl_wowlan_info_notif *notif = (void *)pkt->data; iwl_mvm_parse_wowlan_info_notif(mvm, notif, - d3_data->status, len, - wowlan_info_ver > 3); + d3_data->status, len); } d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO; @@ -3596,8 +3684,6 @@ void iwl_mvm_fast_suspend(struct iwl_mvm *mvm) IWL_ERR(mvm, "fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret); - WARN_ON(iwl_mvm_power_update_mac(mvm)); - ret = iwl_trans_d3_suspend(mvm->trans, false, false); if (ret) IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret); @@ -3619,22 +3705,31 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm) iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); if (iwl_mvm_check_rt_status(mvm, NULL)) { + IWL_ERR(mvm, + "iwl_mvm_check_rt_status failed, device is gone during suspend\n"); set_bit(STATUS_FW_ERROR, &mvm->trans->status); iwl_mvm_dump_nic_error_log(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, false, 0); - return -ENODEV; + mvm->trans->state = IWL_TRANS_NO_FW; + ret = -ENODEV; + + goto out; } ret = iwl_mvm_d3_notif_wait(mvm, &d3_data); + + if (ret) { + IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret); + mvm->trans->state = IWL_TRANS_NO_FW; + } + +out: clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; mvm->fast_resume = false; - if (ret) - IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret); - return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 25f07e00db42..fbe4e4a50852 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -221,7 +221,7 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file, mvmvif->deflink.queue_params[i].uapsd); if (vif->type == NL80211_IFTYPE_STATION && - ap_sta_id != IWL_MVM_INVALID_STA) { + ap_sta_id != IWL_INVALID_STA) { struct iwl_mvm_sta *mvm_sta; mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id); @@ -463,11 +463,13 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) +static ssize_t +iwl_dbgfs_low_latency_write_handle(struct wiphy *wiphy, struct file *file, + char *buf, size_t count, void *data) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *vif = data; u8 value; int ret; @@ -484,12 +486,28 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, return count; } -static ssize_t -iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) +static ssize_t iwl_dbgfs_low_latency_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; + char buf[10] = {}; + + return wiphy_locked_debugfs_write(mvm->hw->wiphy, file, + buf, sizeof(buf), user_buf, count, + iwl_dbgfs_low_latency_write_handle, + vif); +} + +static ssize_t +iwl_dbgfs_low_latency_force_write_handle(struct wiphy *wiphy, struct file *file, + char *buf, size_t count, void *data) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *vif = data; u8 value; int ret; @@ -517,6 +535,22 @@ iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf, return count; } +static ssize_t +iwl_dbgfs_low_latency_force_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[10] = {}; + + return wiphy_locked_debugfs_write(mvm->hw->wiphy, file, + buf, sizeof(buf), user_buf, count, + iwl_dbgfs_low_latency_force_write_handle, + vif); +} + static ssize_t iwl_dbgfs_low_latency_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -831,8 +865,20 @@ MVM_DEBUGFS_READ_FILE_OPS(mac_params); MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); -MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10); + +static const struct file_operations iwl_dbgfs_low_latency_ops = { + .write = iwl_dbgfs_low_latency_write, + .read = iwl_dbgfs_low_latency_read, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +static const struct file_operations iwl_dbgfs_low_latency_force_ops = { + .write = iwl_dbgfs_low_latency_force_write, + .open = simple_open, + .llseek = generic_file_llseek, +}; + MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 55245f913286..b26141c30c61 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -559,12 +559,12 @@ static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, #ifdef CONFIG_IWLWIFI_DEBUGFS if (mvmvif->ftm_unprotected) { - *sta_id = IWL_MVM_INVALID_STA; + *sta_id = IWL_INVALID_STA; *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); } #endif } else { - *sta_id = IWL_MVM_INVALID_STA; + *sta_id = IWL_INVALID_STA; } return 0; @@ -1063,6 +1063,8 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { + case 15: + /* Version 15 has the same struct as 14 */ case 14: err = iwl_mvm_ftm_start_v14(mvm, vif, req); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index e4caa362f597..e6e468e81ab3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -131,7 +131,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (cmd_ver == 10) { + if (cmd_ver >= 10) { cmd.band = iwl_mvm_phy_band_from_nl80211(chandef->chan->band); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index f30b0fc8eca9..5ea684802ad1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1401,6 +1401,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) int ret, i; struct ieee80211_supported_band *sband = NULL; + lockdep_assert_wiphy(mvm->hw->wiphy); lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); @@ -1484,7 +1485,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) for (i = 0; i < IWL_FW_MAX_LINK_ID + 1; i++) RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL); - mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; + mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA; /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); @@ -1620,6 +1621,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) { int ret, i; + lockdep_assert_wiphy(mvm->hw->wiphy); lockdep_assert_held(&mvm->mutex); ret = iwl_trans_start_hw(mvm->trans); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index 628baf67b208..272da41567ef 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -1167,3 +1167,14 @@ void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (!mvmvif->esr_disable_reason) iwl_mvm_esr_unblocked(mvm, vif); } + +void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link) +{ + link->bcast_sta.sta_id = IWL_INVALID_STA; + link->mcast_sta.sta_id = IWL_INVALID_STA; + link->ap_sta_id = IWL_INVALID_STA; + + for (int r = 0; r < NUM_IWL_MVM_SMPS_REQ; r++) + link->smps_requests[r] = + IEEE80211_SMPS_AUTOMATIC; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 9a43df49493f..2a13d70da46c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -216,7 +216,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) .preferred_tsf = NUM_TSF_IDS, .found_vif = false, }; - int ret, i; + int ret; lockdep_assert_held(&mvm->mutex); @@ -298,9 +298,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif->time_event_data.id = TE_MAX; mvmvif->roc_activity = ROC_NUM_ACTIVITIES; - mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA; - mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; + iwl_mvm_init_link(&mvmvif->deflink); /* No need to allocate data queues to P2P Device MAC and NAN.*/ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) @@ -316,9 +314,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } - for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) - mvmvif->deflink.smps_requests[i] = IEEE80211_SMPS_AUTOMATIC; - return 0; exit_fail: @@ -1696,6 +1691,9 @@ iwl_mvm_handle_missed_beacons_notif(struct iwl_mvm *mvm, ieee80211_beacon_loss(vif); else ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC); + + /* try to switch links, no-op if we don't have MLO */ + iwl_mvm_int_mlo_scan(mvm, vif); } iwl_dbg_tlv_time_point(&mvm->fwrt, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a7d49b5d7993..07778d55878b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1109,7 +1109,7 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE; for_each_mvm_vif_valid_link(mvmvif, link_id) { - mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->link[link_id]->ap_sta_id = IWL_INVALID_STA; mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; mvmvif->link[link_id]->phy_ctxt = NULL; mvmvif->link[link_id]->active = 0; @@ -1237,6 +1237,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) fast_resume = mvm->fast_resume; if (fast_resume) { + iwl_mvm_mei_device_state(mvm, true); ret = iwl_mvm_fast_resume(mvm); if (ret) { iwl_mvm_stop_device(mvm); @@ -1379,10 +1380,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend) iwl_mvm_rm_aux_sta(mvm); if (suspend && - mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) { iwl_mvm_fast_suspend(mvm); - else + /* From this point on, we won't touch the device */ + iwl_mvm_mei_device_state(mvm, false); + } else { iwl_mvm_stop_device(mvm); + } iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ @@ -2951,7 +2955,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_MVM_SMPS_REQ_PROT, IEEE80211_SMPS_DYNAMIC, 0); } - } else if (mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + } else if (mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) { iwl_mvm_mei_host_disassociated(mvm); /* * If update fails - SF might be running in associated @@ -2987,7 +2991,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, IWL_ERR(mvm, "failed to remove AP station\n"); - mvmvif->deflink.ap_sta_id = IWL_MVM_INVALID_STA; + mvmvif->deflink.ap_sta_id = IWL_INVALID_STA; } /* remove quota for this interface */ @@ -3444,7 +3448,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, */ break; case STA_NOTIFY_AWAKE: - if (WARN_ON(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON(mvmsta->deflink.sta_id == IWL_INVALID_STA)) break; if (txqs) @@ -3524,6 +3528,8 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); unsigned int link_id; + lockdep_assert_wiphy(mvm->hw->wiphy); + /* * This is called before mac80211 does RCU synchronisation, * so here we already invalidate our internal RCU-protected diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c index 455f5f417506..ef0be44207e1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c @@ -396,7 +396,7 @@ void iwl_mvm_sec_key_remove_ap(struct iwl_mvm *mvm, u8 sec_key_ver = iwl_fw_lookup_cmd_ver(mvm->fw, sec_key_id, 0); if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION || - link->ap_sta_id == IWL_MVM_INVALID_STA)) + link->ap_sta_id == IWL_INVALID_STA)) return; if (!sec_key_ver) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 5cacfbf589dc..b807046144c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -824,7 +824,7 @@ static bool iwl_mvm_mld_vif_have_valid_ap_sta(struct iwl_mvm_vif *mvmvif) int i; for_each_mvm_vif_valid_link(mvmvif, i) { - if (mvmvif->link[i]->ap_sta_id != IWL_MVM_INVALID_STA) + if (mvmvif->link[i]->ap_sta_id != IWL_INVALID_STA) return true; } @@ -851,7 +851,7 @@ static void iwl_mvm_mld_vif_delete_all_stas(struct iwl_mvm *mvm, if (ret) IWL_ERR(mvm, "failed to remove AP station\n"); - link->ap_sta_id = IWL_MVM_INVALID_STA; + link->ap_sta_id = IWL_INVALID_STA; } } @@ -1169,8 +1169,6 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, int err, i; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { - int r; - if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) break; @@ -1182,14 +1180,8 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw, goto free; } - new_link[i]->bcast_sta.sta_id = IWL_MVM_INVALID_STA; - new_link[i]->mcast_sta.sta_id = IWL_MVM_INVALID_STA; - new_link[i]->ap_sta_id = IWL_MVM_INVALID_STA; new_link[i]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID; - - for (r = 0; r < NUM_IWL_MVM_SMPS_REQ; r++) - new_link[i]->smps_requests[r] = - IEEE80211_SMPS_AUTOMATIC; + iwl_mvm_init_link(new_link[i]); } mutex_lock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 99eb1e1db1bb..019839604011 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -146,7 +146,7 @@ int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm, unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout : mvm->trans->trans_cfg->base_params->wd_timeout; - if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA)) return -ENOSPC; if (sta->type == STATION_TYPE_AUX) @@ -346,7 +346,7 @@ static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(int_sta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(int_sta->sta_id == IWL_INVALID_STA)) return -EINVAL; if (flush) @@ -521,6 +521,9 @@ void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm, unsigned int link_id, bool is_in_fw) { + lockdep_assert_wiphy(mvm->hw->wiphy); + lockdep_assert_held(&mvm->mutex); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id], is_in_fw ? ERR_PTR(-EINVAL) : NULL); RCU_INIT_POINTER(mvm->fw_id_to_link_sta[mvm_sta_link->sta_id], NULL); @@ -559,7 +562,10 @@ static int iwl_mvm_mld_alloc_sta_link(struct iwl_mvm *mvm, u32 sta_id = iwl_mvm_find_free_sta_id(mvm, ieee80211_vif_type_p2p(vif)); - if (sta_id == IWL_MVM_INVALID_STA) + lockdep_assert_wiphy(mvm->hw->wiphy); + lockdep_assert_held(&mvm->mutex); + + if (sta_id == IWL_INVALID_STA) return -ENOSPC; if (rcu_access_pointer(sta->link[link_id]) == &sta->deflink) { @@ -612,10 +618,10 @@ static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta, struct iwl_mvm_link_sta *sta_link) { if (!sta->tdls) { - WARN_ON(vif_link->ap_sta_id != IWL_MVM_INVALID_STA); + WARN_ON(vif_link->ap_sta_id != IWL_INVALID_STA); vif_link->ap_sta_id = sta_link->sta_id; } else { - WARN_ON(vif_link->ap_sta_id == IWL_MVM_INVALID_STA); + WARN_ON(vif_link->ap_sta_id == IWL_INVALID_STA); } } @@ -631,6 +637,9 @@ static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm, int ret = -EINVAL; int sta_id; + lockdep_assert_wiphy(mvm->hw->wiphy); + lockdep_assert_held(&mvm->mutex); + /* First add an empty station since allocating a queue requires * a valid station. Since we need a link_id to allocate a station, * pick up the first valid one. @@ -686,7 +695,7 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, spin_lock_init(&mvm_sta->lock); - ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA, + ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_INVALID_STA, STATION_TYPE_PEER); } else { ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta); @@ -858,9 +867,10 @@ int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id) { int ret; + lockdep_assert_wiphy(mvm->hw->wiphy); lockdep_assert_held(&mvm->mutex); - if (WARN_ON(sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON(sta_id == IWL_INVALID_STA)) return 0; ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id); @@ -1064,6 +1074,7 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, unsigned int link_id; int ret; + lockdep_assert_wiphy(mvm->hw->wiphy); lockdep_assert_held(&mvm->mutex); for_each_set_bit(link_id, &old_links_long, @@ -1109,7 +1120,7 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm, goto err; if (vif->type == NL80211_IFTYPE_STATION) - mvm_vif_link->ap_sta_id = IWL_MVM_INVALID_STA; + mvm_vif_link->ap_sta_id = IWL_INVALID_STA; iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id, false); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 5aa48c77b054..2ad615293c75 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1082,6 +1082,7 @@ struct iwl_mvm { /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_STATION_COUNT_MAX]; + /* note: fw_id_to_link_sta must be protected by wiphy and mvm mutexes */ struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX]; u8 rx_ba_sessions; @@ -2104,6 +2105,7 @@ int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); u32 iwl_mvm_get_lmac_id(struct iwl_mvm *mvm, enum nl80211_band band); /* Links */ +void iwl_mvm_init_link(struct iwl_mvm_vif_link_info *link); int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf); int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -2331,7 +2333,7 @@ static inline int iwl_mvm_fast_resume(struct iwl_mvm *mvm) } #endif void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, - struct iwl_wowlan_config_cmd *cmd); + struct iwl_wowlan_config_cmd_v6 *cmd); int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool disable_offloading, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c index 1eb21fe861e5..15d4369678a2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c @@ -10,7 +10,7 @@ #include "mvm.h" void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, - struct iwl_wowlan_config_cmd *cmd) + struct iwl_wowlan_config_cmd_v6 *cmd) { int i; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4dd4a9d5c71f..e25d7570ffab 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1505,8 +1505,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->scan_cmd_size = scan_size; /* invalidate ids to prevent accidental removal of sta_id 0 */ - mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; - mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; + mvm->aux_sta.sta_id = IWL_INVALID_STA; + mvm->snif_sta.sta_id = IWL_INVALID_STA; /* Set EBS as successful as long as not stated otherwise by the FW. */ mvm->last_ebs_successful = true; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 1a0b5f8d4339..9e72db9bab40 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -560,7 +560,8 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig, struct iwl_mvm_vif_link_info *link_info, struct ieee80211_bss_conf *bss_conf) { - struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(vif)->mvm; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; int thold = bss_conf->cqm_rssi_thold; int hyst = bss_conf->cqm_rssi_hyst; int last_event; @@ -625,6 +626,13 @@ static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig, if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) return; + /* We're not in EMLSR and our signal is bad, try to switch link maybe */ + if (sig < IWL_MVM_LOW_RSSI_MLO_SCAN_THRESH && !mvmvif->esr_active) { + iwl_mvm_int_mlo_scan(mvm, vif); + return; + } + + /* We are in EMLSR, check if we need to exit */ exit_esr_thresh = iwl_mvm_get_esr_rssi_thresh(mvm, &bss_conf->chanreq.oper, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index ddcbd80a49fb..376b9b12fa62 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -3597,7 +3597,8 @@ static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm, IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n", n_channels); - if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) || + hweight16(vif->valid_links) == 1) return -EINVAL; size = struct_size(req, channels, n_channels); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 9d05c344d967..cd74c181c260 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -47,7 +47,7 @@ int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) lockdep_is_held(&mvm->mutex))) return sta_id; } - return IWL_MVM_INVALID_STA; + return IWL_INVALID_STA; } /* Calculate the ampdu density and max size */ @@ -1216,7 +1216,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, * can be unshared and finding one (and only one) that can be * reused. * This function is also invoked as a sort of clean-up task, - * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. + * in which case @alloc_for_sta is IWL_INVALID_STA. * * Returns the queue number, or -ENOSPC. */ @@ -1309,7 +1309,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) rcu_read_unlock(); - if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { + if (free_queue >= 0 && alloc_for_sta != IWL_INVALID_STA) { ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); if (ret) @@ -1522,7 +1522,7 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) mutex_lock(&mvm->mutex); - iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); + iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA); while (!list_empty(&mvm->add_stream_txqs)) { struct iwl_mvm_txq *mvmtxq; @@ -1580,7 +1580,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, return 0; /* run the general cleanup/unsharing of queues */ - iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); + iwl_mvm_inactivity_check(mvm, IWL_INVALID_STA); /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && @@ -1756,7 +1756,7 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * this function */ if (!mvm->mld_api_is_used) { - if (WARN_ON(sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON(sta_id == IWL_INVALID_STA)) return -EINVAL; mvm_sta->deflink.sta_id = sta_id; @@ -1865,7 +1865,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, else sta_id = mvm_sta->deflink.sta_id; - if (sta_id == IWL_MVM_INVALID_STA) + if (sta_id == IWL_INVALID_STA) return -ENOSPC; spin_lock_init(&mvm_sta->lock); @@ -1903,10 +1903,10 @@ update_fw: if (vif->type == NL80211_IFTYPE_STATION) { if (!sta->tdls) { - WARN_ON(mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA); + WARN_ON(mvmvif->deflink.ap_sta_id != IWL_INVALID_STA); mvmvif->deflink.ap_sta_id = sta_id; } else { - WARN_ON(mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA); + WARN_ON(mvmvif->deflink.ap_sta_id == IWL_INVALID_STA); } } @@ -2095,7 +2095,7 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0); /* unassoc - go ahead - remove the AP STA now */ - mvm_link->ap_sta_id = IWL_MVM_INVALID_STA; + mvm_link->ap_sta_id = IWL_INVALID_STA; } /* @@ -2103,7 +2103,7 @@ bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * before the STA is removed. */ if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { - mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; + mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA; cancel_delayed_work(&mvm->tdls_cs.dwork); } @@ -2170,9 +2170,9 @@ int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, u8 type) { if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || - sta->sta_id == IWL_MVM_INVALID_STA) { + sta->sta_id == IWL_INVALID_STA) { sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); - if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(sta->sta_id == IWL_INVALID_STA)) return -ENOSPC; } @@ -2188,7 +2188,7 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) { RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); - sta->sta_id = IWL_MVM_INVALID_STA; + sta->sta_id = IWL_INVALID_STA; } static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, @@ -2306,7 +2306,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_INVALID_STA)) return -EINVAL; iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id, @@ -2324,7 +2324,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); - if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_INVALID_STA)) return -EINVAL; iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id, @@ -2389,7 +2389,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; - if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(bsta->sta_id == IWL_INVALID_STA)) return -ENOSPC; ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, @@ -2644,7 +2644,7 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, u32 status; /* This is a valid situation for GTK removal */ - if (sta_id == IWL_MVM_INVALID_STA) + if (sta_id == IWL_INVALID_STA) return 0; key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & @@ -3514,7 +3514,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, * station ID, then use AP's station ID. */ if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) { u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], @@ -3569,7 +3569,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY, new_api ? 2 : 1); - if (sta_id == IWL_MVM_INVALID_STA) + if (sta_id == IWL_INVALID_STA) return -EINVAL; keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & @@ -3728,7 +3728,7 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, if (remove_key) { /* This is a valid situation for IGTK */ - if (sta_id == IWL_MVM_INVALID_STA) + if (sta_id == IWL_INVALID_STA) return 0; igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); @@ -3795,7 +3795,7 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, return sta->addr; if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { + mvmvif->deflink.ap_sta_id != IWL_INVALID_STA) { u8 sta_id = mvmvif->deflink.ap_sta_id; sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); @@ -3865,7 +3865,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; - u8 sta_id = IWL_MVM_INVALID_STA; + u8 sta_id = IWL_INVALID_STA; int ret; static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; @@ -3966,7 +3966,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, { bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); struct iwl_mvm_sta *mvm_sta; - u8 sta_id = IWL_MVM_INVALID_STA; + u8 sta_id = IWL_INVALID_STA; int ret, i; lockdep_assert_held(&mvm->mutex); @@ -4273,7 +4273,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, return; /* Need to block/unblock also multicast station */ - if (mvmvif->deflink.mcast_sta.sta_id != IWL_MVM_INVALID_STA) + if (mvmvif->deflink.mcast_sta.sta_id != IWL_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->deflink.mcast_sta, disable); @@ -4282,7 +4282,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, * Only unblock the broadcast station (FW blocks it for immediate * quiet, not the driver) */ - if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_MVM_INVALID_STA) + if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_INVALID_STA) iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, &mvmvif->deflink.bcast_sta, disable); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 3d25ff5cd7e8..65927ebbabb7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -196,7 +196,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm, mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm); if (state == IWL_MVM_TDLS_SW_IDLE) - mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA; + mvm->tdls_cs.cur_sta_id = IWL_INVALID_STA; } void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) @@ -250,7 +250,7 @@ iwl_mvm_tdls_check_action(struct iwl_mvm *mvm, /* get the existing peer if it's there */ if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE && - mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { + mvm->tdls_cs.cur_sta_id != IWL_INVALID_STA) { struct ieee80211_sta *sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id], lockdep_is_held(&mvm->mutex)); @@ -465,7 +465,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); /* station might be gone, in that case do nothing */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) + if (mvm->tdls_cs.peer.sta_id == IWL_INVALID_STA) return; sta = rcu_dereference_protected( @@ -512,7 +512,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, sta->addr, chandef->chan->center_freq, chandef->width); /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) { + if (mvm->tdls_cs.peer.sta_id != IWL_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "Existing peer. Can't start switch with %pM\n", sta->addr); @@ -566,7 +566,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr); /* we only support a single peer for channel switching */ - if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) { + if (mvm->tdls_cs.peer.sta_id == IWL_INVALID_STA) { IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr); goto out; } @@ -587,7 +587,7 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE) wait_for_phy = true; - mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; + mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA; dev_kfree_skb(mvm->tdls_cs.peer.skb); mvm->tdls_cs.peer.skb = NULL; @@ -630,7 +630,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE && params->status != 0 && mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT && - mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) { + mvm->tdls_cs.cur_sta_id != IWL_INVALID_STA) { struct ieee80211_sta *cur_sta; /* make sure it's the same peer */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index ca026b5256ce..c9867d26361b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1213,7 +1213,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; - if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA)) return -1; if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he) @@ -1357,7 +1357,7 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, mvmsta = iwl_mvm_sta_from_mac80211(sta); - if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA)) + if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_INVALID_STA)) return -1; memcpy(&info, skb->cb, sizeof(info)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 1d1364d03f02..dd890dcd1505 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -261,7 +261,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq) .data = { lq, }, }; - if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA || + if (WARN_ON(lq->sta_id == IWL_INVALID_STA || iwl_mvm_has_tlc_offload(mvm))) return -EINVAL; @@ -679,10 +679,8 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm) mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bss_iface_iterator, &bss_iter_data); - if (bss_iter_data.error) { - IWL_ERR(mvm, "More than one managed interface active!\n"); + if (bss_iter_data.error) return ERR_PTR(-EINVAL); - } return bss_iter_data.vif; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 3b9943eb6934..d19b3bd0866b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1643,6 +1643,8 @@ int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, out: if (*status == IWL_D3_STATUS_ALIVE) ret = iwl_pcie_d3_handshake(trans, false); + else + trans->state = IWL_TRANS_NO_FW; return ret; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c index ec02148a7f1f..08590aa68356 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c @@ -71,7 +71,7 @@ MODULE_FIRMWARE(MT7628_FIRMWARE_E2); struct platform_driver mt76_wmac_driver = { .probe = mt76_wmac_probe, - .remove_new = mt76_wmac_remove, + .remove = mt76_wmac_remove, .driver = { .name = "mt76_wmac", .of_match_table = of_wmac_match, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c index 12e3e4a91d27..06a0f2a141e8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c @@ -63,7 +63,7 @@ struct platform_driver mt7622_wmac_driver = { .of_match_table = mt7622_wmac_of_match, }, .probe = mt7622_wmac_probe, - .remove_new = mt7622_wmac_remove, + .remove = mt7622_wmac_remove, }; MODULE_FIRMWARE(MT7622_FIRMWARE_N9); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c index 90a6f61d1089..c823a7554a3a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c @@ -1303,7 +1303,7 @@ struct platform_driver mt798x_wmac_driver = { .of_match_table = mt798x_wmac_of_match, }, .probe = mt798x_wmac_probe, - .remove_new = mt798x_wmac_remove, + .remove = mt798x_wmac_remove, }; MODULE_FIRMWARE(MT7986_FIRMWARE_WA); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index 039bbedb41c2..379193b24428 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -3409,17 +3409,6 @@ void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) } } -void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], *****************Coex DM Reset****************\n"); - - halbtc8723b1ant_init_hw_config(btcoexist, false, false); - halbtc8723b1ant_init_coex_dm(btcoexist); -} - void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv = btcoexist->adapter; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h index 9d41e11388ad..a4506d838dc7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h @@ -197,7 +197,6 @@ void ex_btc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type); void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist); void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnpstate); -void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist); void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist); void ex_btc8723b1ant_display_coex_info(struct btc_coexist *btcoexist, struct seq_file *m); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index be4c0e60d44d..478cca33e5e3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -1708,19 +1708,6 @@ void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, halbtc_normal_low_power(btcoexist); } -void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type) -{ - if (!halbtc_is_bt_coexist_available(btcoexist)) - return; - - if (IS_HARDWARE_TYPE_8821(btcoexist->adapter)) { - } else if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) { - if (btcoexist->board_info.btdm_ant_num == 1) - ex_btc8723b1ant_rf_status_notify(btcoexist, type); - } else if (IS_HARDWARE_TYPE_8192E(btcoexist->adapter)) { - } -} - void exhalbtc_halt_notify(struct btc_coexist *btcoexist) { if (!halbtc_is_bt_coexist_available(btcoexist)) @@ -1768,31 +1755,6 @@ void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state) } } -void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist) -{ - struct rtl_priv *rtlpriv = btcoexist->adapter; - - if (!halbtc_is_bt_coexist_available(btcoexist)) - return; - btcoexist->statistics.cnt_coex_dm_switch++; - - halbtc_leave_low_power(btcoexist); - - if (IS_HARDWARE_TYPE_8723B(btcoexist->adapter)) { - if (btcoexist->board_info.btdm_ant_num == 1) { - btcoexist->stop_coex_dm = true; - ex_btc8723b1ant_coex_dm_reset(btcoexist); - exhalbtc_set_ant_num(rtlpriv, - BT_COEX_ANT_TYPE_DETECTED, 2); - ex_btc8723b2ant_init_hwconfig(btcoexist); - ex_btc8723b2ant_init_coex_dm(btcoexist); - btcoexist->stop_coex_dm = false; - } - } - - halbtc_normal_low_power(btcoexist); -} - void exhalbtc_periodical(struct btc_coexist *btcoexist) { if (!halbtc_is_bt_coexist_available(btcoexist)) @@ -1820,29 +1782,6 @@ void exhalbtc_periodical(struct btc_coexist *btcoexist) halbtc_normal_low_power(btcoexist); } -void exhalbtc_dbg_control(struct btc_coexist *btcoexist, - u8 code, u8 len, u8 *data) -{ - if (!halbtc_is_bt_coexist_available(btcoexist)) - return; - btcoexist->statistics.cnt_dbg_ctrl++; - - halbtc_leave_low_power(btcoexist); - - halbtc_normal_low_power(btcoexist); -} - -void exhalbtc_antenna_detection(struct btc_coexist *btcoexist, u32 cent_freq, - u32 offset, u32 span, u32 seconds) -{ - if (!halbtc_is_bt_coexist_available(btcoexist)) - return; -} - -void exhalbtc_stack_update_profile_info(void) -{ -} - void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi) { if (!halbtc_is_bt_coexist_available(btcoexist)) @@ -1851,24 +1790,6 @@ void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi) btcoexist->stack_info.min_bt_rssi = bt_rssi; } -void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version) -{ - if (!halbtc_is_bt_coexist_available(btcoexist)) - return; - - btcoexist->stack_info.hci_version = hci_version; -} - -void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist, - u16 bt_hci_version, u16 bt_patch_version) -{ - if (!halbtc_is_bt_coexist_available(btcoexist)) - return; - - btcoexist->bt_info.bt_real_fw_ver = bt_patch_version; - btcoexist->bt_info.bt_hci_ver = bt_hci_version; -} - void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type) { switch (chip_type) { diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index a96a995dd850..d8d88a989806 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -763,19 +763,9 @@ void exhalbtc_mediastatus_notify(struct btc_coexist *btcoexist, void exhalbtc_special_packet_notify(struct btc_coexist *btcoexist, u8 pkt_type); void exhalbtc_bt_info_notify(struct btc_coexist *btcoexist, u8 *tmp_buf, u8 length); -void exhalbtc_rf_status_notify(struct btc_coexist *btcoexist, u8 type); void exhalbtc_halt_notify(struct btc_coexist *btcoexist); void exhalbtc_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state); -void exhalbtc_coex_dm_switch(struct btc_coexist *btcoexist); void exhalbtc_periodical(struct btc_coexist *btcoexist); -void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len, - u8 *data); -void exhalbtc_antenna_detection(struct btc_coexist *btcoexist, u32 cent_freq, - u32 offset, u32 span, u32 seconds); -void exhalbtc_stack_update_profile_info(void); -void exhalbtc_set_hci_version(struct btc_coexist *btcoexist, u16 hci_version); -void exhalbtc_set_bt_patch_version(struct btc_coexist *btcoexist, - u16 bt_hci_version, u16 bt_patch_version); void exhalbtc_update_min_bt_rssi(struct btc_coexist *btcoexist, s8 bt_rssi); void exhalbtc_set_bt_exist(struct btc_coexist *btcoexist, bool bt_exist); void exhalbtc_set_chip_type(struct btc_coexist *btcoexist, u8 chip_type); diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index 82cf5fb5175f..6518e77b89f5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -162,10 +162,19 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value) void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 max_attempts = 10000; u32 value32; u8 readbyte; u16 retry; + /* + * In case of USB devices, transfer speeds are limited, hence + * efuse I/O reads could be (way) slower. So, decrease (a lot) + * the read attempts in case of failures. + */ + if (rtlpriv->rtlhal.interface == INTF_USB) + max_attempts = 10; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (_offset & 0xff)); readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2); @@ -178,7 +187,7 @@ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf) retry = 0; value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]); - while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) { + while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < max_attempts)) { value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]); retry++; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 1b144fbd4d26..048744166a92 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -350,7 +350,8 @@ MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless"); -MODULE_FIRMWARE("rtlwifi/rtl8723efw.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8723fw.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8723fw_B.bin"); module_param_named(swenc, rtl8723e_mod_params.sw_crypto, bool, 0444); module_param_named(debug_level, rtl8723e_mod_params.debug_level, int, 0644); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index a65503c5ae5a..b5266e560416 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -407,6 +407,9 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless"); MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin"); MODULE_FIRMWARE("rtlwifi/rtl8821aefw_29.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8821aefw_wowlan.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8812aefw.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8812aefw_wowlan.bin"); module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444); module_param_named(debug_level, rtl8821ae_mod_params.debug_level, int, 0644); diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig index 02b0d698413b..733b3e58da51 100644 --- a/drivers/net/wireless/realtek/rtw88/Kconfig +++ b/drivers/net/wireless/realtek/rtw88/Kconfig @@ -43,6 +43,17 @@ config RTW88_8723D config RTW88_8821C tristate +config RTW88_88XXA + tristate + +config RTW88_8821A + tristate + select RTW88_88XXA + +config RTW88_8812A + tristate + select RTW88_88XXA + config RTW88_8822BE tristate "Realtek 8822BE PCI wireless network adapter" depends on PCI @@ -189,6 +200,28 @@ config RTW88_8821CU 802.11ac USB wireless network adapter +config RTW88_8821AU + tristate "Realtek 8821AU/8811AU USB wireless network adapter" + depends on USB + select RTW88_CORE + select RTW88_USB + select RTW88_8821A + help + Select this option will enable support for 8821AU and 8811AU chipset + + 802.11ac USB wireless network adapter + +config RTW88_8812AU + tristate "Realtek 8812AU USB wireless network adapter" + depends on USB + select RTW88_CORE + select RTW88_USB + select RTW88_8812A + help + Select this option will enable support for 8812AU chipset + + 802.11ac USB wireless network adapter + config RTW88_DEBUG bool "Realtek rtw88 debug support" depends on RTW88_CORE diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile index 8f47359b4380..f0b49f5a8a5a 100644 --- a/drivers/net/wireless/realtek/rtw88/Makefile +++ b/drivers/net/wireless/realtek/rtw88/Makefile @@ -77,6 +77,21 @@ rtw88_8821cs-objs := rtw8821cs.o obj-$(CONFIG_RTW88_8821CU) += rtw88_8821cu.o rtw88_8821cu-objs := rtw8821cu.o +obj-$(CONFIG_RTW88_88XXA) += rtw88_88xxa.o +rtw88_88xxa-objs := rtw88xxa.o + +obj-$(CONFIG_RTW88_8821A) += rtw88_8821a.o +rtw88_8821a-objs := rtw8821a.o rtw8821a_table.o + +obj-$(CONFIG_RTW88_8812A) += rtw88_8812a.o +rtw88_8812a-objs := rtw8812a.o rtw8812a_table.o + +obj-$(CONFIG_RTW88_8821AU) += rtw88_8821au.o +rtw88_8821au-objs := rtw8821au.o + +obj-$(CONFIG_RTW88_8812AU) += rtw88_8812au.o +rtw88_8812au-objs := rtw8812au.o + obj-$(CONFIG_RTW88_PCI) += rtw88_pci.o rtw88_pci-objs := pci.o diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index a99776af56c2..c929db1e53ca 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -446,7 +446,7 @@ static void rtw_coex_check_rfk(struct rtw_dev *rtwdev) } } -static void rtw_coex_query_bt_info(struct rtw_dev *rtwdev) +void rtw_coex_query_bt_info(struct rtw_dev *rtwdev) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_stat *coex_stat = &coex->stat; @@ -494,11 +494,29 @@ static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev) struct rtw_coex_stat *coex_stat = &coex->stat; struct rtw_coex_dm *coex_dm = &coex->dm; bool bt_disabled = false; + bool bt_active = true; u16 score_board; if (chip->scbd_support) { score_board = rtw_coex_read_scbd(rtwdev); bt_disabled = !(score_board & COEX_SCBD_ONOFF); + } else { + if (coex_stat->hi_pri_tx == 0 && coex_stat->hi_pri_rx == 0 && + coex_stat->lo_pri_tx == 0 && coex_stat->lo_pri_rx == 0) + bt_active = false; + + if (coex_stat->hi_pri_tx == 0xffff && coex_stat->hi_pri_rx == 0xffff && + coex_stat->lo_pri_tx == 0xffff && coex_stat->lo_pri_rx == 0xffff) + bt_active = false; + + if (bt_active) { + coex_stat->bt_disable_cnt = 0; + bt_disabled = false; + } else { + coex_stat->bt_disable_cnt++; + if (coex_stat->bt_disable_cnt >= 10) + bt_disabled = true; + } } if (coex_stat->bt_disabled != bt_disabled) { @@ -950,12 +968,18 @@ static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control) static void rtw_coex_set_gnt_bt(struct rtw_dev *rtwdev, u8 state) { + if (!rtwdev->chip->ltecoex_addr) + return; + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0xc000, state); rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0c00, state); } static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state) { + if (!rtwdev->chip->ltecoex_addr) + return; + rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x3000, state); rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, 0x0300, state); } @@ -2747,16 +2771,19 @@ void rtw_coex_power_on_setting(struct rtw_dev *rtwdev) rtw_write8(rtwdev, 0xff1a, 0x0); rtw_coex_set_gnt_debug(rtwdev); } +EXPORT_SYMBOL(rtw_coex_power_on_setting); void rtw_coex_power_off_setting(struct rtw_dev *rtwdev) { rtw_write16(rtwdev, REG_WIFI_BT_INFO, BIT_BT_INT_EN); } +EXPORT_SYMBOL(rtw_coex_power_off_setting); void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only) { __rtw_coex_init_hw_config(rtwdev, wifi_only); } +EXPORT_SYMBOL(rtw_coex_init_hw_config); void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type) { @@ -3904,7 +3931,7 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) u8 sys_lte; u16 score_board_WB, score_board_BW; u32 wl_reg_6c0, wl_reg_6c4, wl_reg_6c8, wl_reg_778, wl_reg_6cc; - u32 lte_coex, bt_coex; + u32 lte_coex = 0, bt_coex = 0; int i; score_board_BW = rtw_coex_read_scbd(rtwdev); @@ -3916,8 +3943,10 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m) wl_reg_778 = rtw_read8(rtwdev, REG_BT_STAT_CTRL); sys_lte = rtw_read8(rtwdev, 0x73); - lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38); - bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54); + if (rtwdev->chip->ltecoex_addr) { + lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38); + bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54); + } if (!coex_stat->wl_under_ips && (!coex_stat->wl_under_lps || coex_stat->wl_force_lps_ctrl) && diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h index 57cf29da9ea4..c398be8391f7 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.h +++ b/drivers/net/wireless/realtek/rtw88/coex.h @@ -384,6 +384,7 @@ u32 rtw_coex_read_indirect_reg(struct rtw_dev *rtwdev, u16 addr); void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr, u32 mask, u32 val); void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set); +void rtw_coex_query_bt_info(struct rtw_dev *rtwdev); void rtw_coex_bt_relink_work(struct work_struct *work); void rtw_coex_bt_reenable_work(struct work_struct *work); @@ -419,4 +420,14 @@ static inline bool rtw_coex_disabled(struct rtw_dev *rtwdev) return coex_stat->bt_disabled; } +static inline void rtw_coex_active_query_bt_info(struct rtw_dev *rtwdev) +{ + /* The RTL8821AU firmware doesn't send C2H_BT_INFO by itself + * when bluetooth headphones are disconnected, so we have to + * ask for it regularly. + */ + if (rtwdev->chip->id == RTW_CHIP_TYPE_8821A && rtwdev->efuse.btcoex) + rtw_coex_query_bt_info(rtwdev); +} + #endif diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index c26a6905fd15..364ec0436d0f 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -308,7 +308,7 @@ static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; - u8 page_size = rtwdev->chip->page_size; + u16 page_size = rtwdev->chip->page_size; u32 buf_size = debugfs_priv->rsvd_page.page_num * page_size; u32 offset = debugfs_priv->rsvd_page.page_offset * page_size; u8 *buf; diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 813c12148819..e6e9946fbf44 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -139,25 +139,30 @@ static u16 get_max_amsdu_len(u32 bit_rate) struct rtw_fw_iter_ra_data { struct rtw_dev *rtwdev; u8 *payload; + u8 length; }; static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta) { struct rtw_fw_iter_ra_data *ra_data = data; + struct rtw_c2h_ra_rpt *ra_rpt = (struct rtw_c2h_ra_rpt *)ra_data->payload; struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; u8 mac_id, rate, sgi, bw; u8 mcs, nss; u32 bit_rate; - mac_id = GET_RA_REPORT_MACID(ra_data->payload); + mac_id = ra_rpt->mac_id; if (si->mac_id != mac_id) return; si->ra_report.txrate.flags = 0; - rate = GET_RA_REPORT_RATE(ra_data->payload); - sgi = GET_RA_REPORT_SGI(ra_data->payload); - bw = GET_RA_REPORT_BW(ra_data->payload); + rate = u8_get_bits(ra_rpt->rate_sgi, RTW_C2H_RA_RPT_RATE); + sgi = u8_get_bits(ra_rpt->rate_sgi, RTW_C2H_RA_RPT_SGI); + if (ra_data->length >= offsetofend(typeof(*ra_rpt), bw)) + bw = ra_rpt->bw; + else + bw = si->bw_mode; if (rate < DESC_RATEMCS0) { si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate); @@ -197,14 +202,18 @@ legacy: static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload, u8 length) { + struct rtw_c2h_ra_rpt *ra_rpt = (struct rtw_c2h_ra_rpt *)payload; struct rtw_fw_iter_ra_data ra_data; - if (WARN(length < 7, "invalid ra report c2h length\n")) + if (WARN(length < rtwdev->chip->c2h_ra_report_size, + "invalid ra report c2h length %d\n", length)) return; - rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload); + rtwdev->dm_info.tx_rate = u8_get_bits(ra_rpt->rate_sgi, + RTW_C2H_RA_RPT_RATE); ra_data.rtwdev = rtwdev; ra_data.payload = payload; + ra_data.length = length; rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data); } @@ -1281,16 +1290,16 @@ static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type); pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); memset(pkt_desc, 0, chip->tx_pkt_desc_sz); - rtw_tx_fill_tx_desc(&pkt_info, skb); + rtw_tx_fill_tx_desc(rtwdev, &pkt_info, skb); } -static inline u8 rtw_len_to_page(unsigned int len, u8 page_size) +static inline u8 rtw_len_to_page(unsigned int len, u16 page_size) { return DIV_ROUND_UP(len, page_size); } -static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size, - u8 page_margin, u32 page, u8 *buf, +static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u16 page_size, + u16 page_margin, u32 page, u8 *buf, struct rtw_rsvd_page *rsvd_pkt) { struct sk_buff *skb = rsvd_pkt->skb; @@ -1592,13 +1601,13 @@ static int __rtw_build_rsvd_page_from_vifs(struct rtw_dev *rtwdev) static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size) { - struct ieee80211_hw *hw = rtwdev->hw; const struct rtw_chip_info *chip = rtwdev->chip; - struct sk_buff *iter; + struct ieee80211_hw *hw = rtwdev->hw; struct rtw_rsvd_page *rsvd_pkt; - u32 page = 0; + struct sk_buff *iter; + u16 page_size, page_margin, tx_desc_sz; u8 total_page = 0; - u8 page_size, page_margin, tx_desc_sz; + u32 page = 0; u8 *buf; int ret; @@ -2004,12 +2013,13 @@ static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes, { const struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *skb, *tmp; - u8 page_offset = 1, *buf, page_size = chip->page_size; u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc; - u16 buf_offset = page_size * page_offset; u8 tx_desc_sz = chip->tx_pkt_desc_sz; - u8 page_cnt, pages; + u16 page_size = chip->page_size; + u8 page_offset = 1, *buf; + u16 buf_offset = page_size * page_offset; unsigned int pkt_len; + u8 page_cnt, pages; int ret; if (rtw_fw_feature_ext_check(&rtwdev->fw, FW_FEATURE_EXT_OLD_PAGE_NUM)) diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index e999c24e4634..404de1b0c407 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -85,6 +85,19 @@ struct rtw_c2h_adaptivity { u8 option; } __packed; +struct rtw_c2h_ra_rpt { + u8 rate_sgi; + u8 mac_id; + u8 byte2; + u8 status; + u8 byte4; + u8 ra_ratio; + u8 bw; +} __packed; + +#define RTW_C2H_RA_RPT_RATE GENMASK(6, 0) +#define RTW_C2H_RA_RPT_SGI BIT(7) + struct rtw_h2c_register { u32 w0; u32 w1; @@ -364,10 +377,6 @@ struct rtw_fw_hdr_legacy { #define GET_CHAN_SWITCH_CENTRAL_CH(c2h_payload) (c2h_payload[2]) #define GET_CHAN_SWITCH_ID(c2h_payload) (c2h_payload[3]) #define GET_CHAN_SWITCH_STATUS(c2h_payload) (c2h_payload[4]) -#define GET_RA_REPORT_RATE(c2h_payload) (c2h_payload[0] & 0x7f) -#define GET_RA_REPORT_SGI(c2h_payload) ((c2h_payload[0] & 0x80) >> 7) -#define GET_RA_REPORT_BW(c2h_payload) (c2h_payload[6]) -#define GET_RA_REPORT_MACID(c2h_payload) (c2h_payload[1]) #define GET_BCN_FILTER_NOTIFY_TYPE(c2h_payload) (c2h_payload[1] & 0xf) #define GET_BCN_FILTER_NOTIFY_EVENT(c2h_payload) (c2h_payload[1] & 0x10) diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index e5abcc20b63c..cae9cca6dca3 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -227,8 +227,8 @@ static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask, return 0; } -static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, - const struct rtw_pwr_seq_cmd * const *cmd_seq) +int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, + const struct rtw_pwr_seq_cmd * const *cmd_seq) { u8 cut_mask; u8 intf_mask; @@ -267,6 +267,7 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, return 0; } +EXPORT_SYMBOL(rtw_pwr_seq_parser); static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on) { @@ -994,6 +995,7 @@ int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) return 0; } +EXPORT_SYMBOL(rtw_download_firmware); static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues) { @@ -1127,7 +1129,7 @@ static int txdma_queue_mapping(struct rtw_dev *rtwdev) return 0; } -static int set_trx_fifo_info(struct rtw_dev *rtwdev) +int rtw_set_trx_fifo_info(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fifo_conf *fifo = &rtwdev->fifo; @@ -1136,7 +1138,7 @@ static int set_trx_fifo_info(struct rtw_dev *rtwdev) /* config rsvd page num */ fifo->rsvd_drv_pg_num = chip->rsvd_drv_pg_num; - fifo->txff_pg_num = chip->txff_size >> 7; + fifo->txff_pg_num = chip->txff_size / chip->page_size; if (rtw_chip_wcpu_11n(rtwdev)) fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num; else @@ -1179,6 +1181,7 @@ static int set_trx_fifo_info(struct rtw_dev *rtwdev) return 0; } +EXPORT_SYMBOL(rtw_set_trx_fifo_info); static int __priority_queue_cfg(struct rtw_dev *rtwdev, const struct rtw_page_table *pg_tbl, @@ -1256,7 +1259,7 @@ static int priority_queue_cfg(struct rtw_dev *rtwdev) u16 pubq_num; int ret; - ret = set_trx_fifo_info(rtwdev); + ret = rtw_set_trx_fifo_info(rtwdev); if (ret) return ret; diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h index 58c3dccc14bb..6905e2747372 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.h +++ b/drivers/net/wireless/realtek/rtw88/mac.h @@ -30,11 +30,14 @@ void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw, u8 primary_ch_idx); +int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, + const struct rtw_pwr_seq_cmd * const *cmd_seq); int rtw_mac_power_on(struct rtw_dev *rtwdev); void rtw_mac_power_off(struct rtw_dev *rtwdev); int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw); int rtw_mac_init(struct rtw_dev *rtwdev); void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop); +int rtw_set_trx_fifo_info(struct rtw_dev *rtwdev); int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size); static inline void rtw_mac_flush_all_queues(struct rtw_dev *rtwdev, bool drop) diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index bbdef38c7e34..e91530ed05a0 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -202,6 +202,21 @@ static void rtw_vif_watch_dog_iter(void *data, struct ieee80211_vif *vif) rtwvif->stats.rx_cnt = 0; } +static void rtw_sw_beacon_loss_check(struct rtw_dev *rtwdev, + struct rtw_vif *rtwvif, int received_beacons) +{ + int watchdog_delay = 2000000 / 1024; /* TU */ + int beacon_int, expected_beacons; + + if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER) || !rtwvif) + return; + + beacon_int = rtwvif_to_vif(rtwvif)->bss_conf.beacon_int; + expected_beacons = DIV_ROUND_UP(watchdog_delay, beacon_int); + + rtwdev->beacon_loss = received_beacons < expected_beacons / 2; +} + /* process TX/RX statistics periodically for hardware, * the information helps hardware to enhance performance */ @@ -212,6 +227,7 @@ static void rtw_watch_dog_work(struct work_struct *work) struct rtw_traffic_stats *stats = &rtwdev->stats; struct rtw_watch_dog_iter_data data = {}; bool busy_traffic = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + int received_beacons = rtwdev->dm_info.cur_pkt_count.num_bcn_pkt; u32 tx_unicast_mbps, rx_unicast_mbps; bool ps_active; @@ -258,6 +274,7 @@ static void rtw_watch_dog_work(struct work_struct *work) rtw_leave_lps(rtwdev); rtw_coex_wl_status_check(rtwdev); rtw_coex_query_bt_hid_list(rtwdev); + rtw_coex_active_query_bt_info(rtwdev); rtw_phy_dynamic_mechanism(rtwdev); @@ -270,6 +287,8 @@ static void rtw_watch_dog_work(struct work_struct *work) */ rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data); + rtw_sw_beacon_loss_check(rtwdev, data.rtwvif, received_beacons); + /* fw supports only one station associated to enter lps, if there are * more than two stations associated to the AP, then we can not enter * lps, because fw does not handle the overlapped beacon interval @@ -1309,7 +1328,7 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, rtw_fw_send_ra_info(rtwdev, si, reset_ra_mask); } -static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) +int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw; @@ -1329,6 +1348,7 @@ static int rtw_wait_firmware_completion(struct rtw_dev *rtwdev) return ret; } +EXPORT_SYMBOL(rtw_wait_firmware_completion); static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) @@ -1350,7 +1370,7 @@ static enum rtw_lps_deep_mode rtw_update_lps_deep_mode(struct rtw_dev *rtwdev, return LPS_DEEP_MODE_NONE; } -static int rtw_power_on(struct rtw_dev *rtwdev) +int rtw_power_on(struct rtw_dev *rtwdev) { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_fw_state *fw = &rtwdev->fw; @@ -1413,6 +1433,7 @@ err_off: err: return ret; } +EXPORT_SYMBOL(rtw_power_on); void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start) { @@ -1485,7 +1506,7 @@ int rtw_core_start(struct rtw_dev *rtwdev) { int ret; - ret = rtw_power_on(rtwdev); + ret = rtwdev->chip->ops->power_on(rtwdev); if (ret) return ret; @@ -1505,12 +1526,13 @@ int rtw_core_start(struct rtw_dev *rtwdev) return 0; } -static void rtw_power_off(struct rtw_dev *rtwdev) +void rtw_power_off(struct rtw_dev *rtwdev) { rtw_hci_stop(rtwdev); rtw_coex_power_off_setting(rtwdev); rtw_mac_power_off(rtwdev); } +EXPORT_SYMBOL(rtw_power_off); void rtw_core_stop(struct rtw_dev *rtwdev) { @@ -1535,7 +1557,7 @@ void rtw_core_stop(struct rtw_dev *rtwdev) mutex_lock(&rtwdev->mutex); - rtw_power_off(rtwdev); + rtwdev->chip->ops->power_off(rtwdev); } static void rtw_init_ht_cap(struct rtw_dev *rtwdev, @@ -1917,6 +1939,9 @@ static int rtw_dump_hw_feature(struct rtw_dev *rtwdev) u8 bw; int i; + if (!rtwdev->chip->hw_feature_report) + return 0; + id = rtw_read8(rtwdev, REG_C2HEVT); if (id != C2H_HW_FEATURE_REPORT) { rtw_err(rtwdev, "failed to read hw feature report\n"); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 05cfb235f272..cd09fb6f7b8b 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -189,6 +189,8 @@ enum rtw_chip_type { RTW_CHIP_TYPE_8723D, RTW_CHIP_TYPE_8821C, RTW_CHIP_TYPE_8703B, + RTW_CHIP_TYPE_8821A, + RTW_CHIP_TYPE_8812A, }; enum rtw_tx_queue_type { @@ -841,6 +843,8 @@ struct rtw_regd { }; struct rtw_chip_ops { + int (*power_on)(struct rtw_dev *rtwdev); + void (*power_off)(struct rtw_dev *rtwdev); int (*mac_init)(struct rtw_dev *rtwdev); int (*dump_fw_crash)(struct rtw_dev *rtwdev); void (*shutdown)(struct rtw_dev *rtwdev); @@ -1095,17 +1099,20 @@ enum rtw_rfe_fem { struct rtw_rfe_def { const struct rtw_table *phy_pg_tbl; const struct rtw_table *txpwr_lmt_tbl; + const struct rtw_pwr_track_tbl *pwr_track_tbl; const struct rtw_table *agc_btg_tbl; }; -#define RTW_DEF_RFE(chip, bb_pg, pwrlmt) { \ +#define RTW_DEF_RFE(chip, bb_pg, pwrlmt, track) { \ .phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \ .txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \ + .pwr_track_tbl = &rtw ## chip ## _pwr_track_type ## track ## _tbl, \ } -#define RTW_DEF_RFE_EXT(chip, bb_pg, pwrlmt, btg) { \ +#define RTW_DEF_RFE_EXT(chip, bb_pg, pwrlmt, track, btg) { \ .phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \ .txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \ + .pwr_track_tbl = &rtw ## chip ## _pwr_track_type ## track ## _tbl, \ .agc_btg_tbl = &rtw ## chip ## _agc_btg_type ## btg ## _tbl, \ } @@ -1183,7 +1190,7 @@ struct rtw_chip_info { u32 fw_rxff_size; u16 rsvd_drv_pg_num; u8 band; - u8 page_size; + u16 page_size; u8 csi_buf_pg_num; u8 dig_max; u8 dig_min; @@ -1198,6 +1205,9 @@ struct rtw_chip_info { const struct rtw_fwcd_segs *fwcd_segs; u8 usb_tx_agg_desc_num; + bool hw_feature_report; + u8 c2h_ra_report_size; + bool old_datarate_fb_limit; u8 default_1ss_tx_path; @@ -1236,7 +1246,6 @@ struct rtw_chip_info { u16 dpd_ratemask; u8 iqk_threshold; u8 lck_threshold; - const struct rtw_pwr_track_tbl *pwr_track_tbl; u8 bfer_su_max_num; u8 bfer_mu_max_num; @@ -1485,6 +1494,7 @@ struct rtw_coex_stat { u8 bt_hid_slot; u8 bt_a2dp_bitpool; u8 bt_iqk_state; + u8 bt_disable_cnt; u16 wl_beacon_interval; u8 wl_noisy_level; @@ -1708,7 +1718,7 @@ struct rtw_dm_info { bool pwr_trk_init_trigger; struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX]; s8 txagc_remnant_cck; - s8 txagc_remnant_ofdm; + s8 txagc_remnant_ofdm[RTW_RF_PATH_MAX]; u8 rx_cck_agc_report_type; /* backup dack results for each path and I/Q */ @@ -1831,6 +1841,20 @@ struct rtw_phy_cond { #define BRANCH_ENDIF 3 }; +struct rtw_phy_cond2 { +#ifdef __LITTLE_ENDIAN + u8 type_glna; + u8 type_gpa; + u8 type_alna; + u8 type_apa; +#else + u8 type_apa; + u8 type_alna; + u8 type_gpa; + u8 type_glna; +#endif +}; + struct rtw_fifo_conf { /* tx fifo information */ u16 rsvd_boundary; @@ -1912,6 +1936,7 @@ struct rtw_hal { u8 oem_id; u8 pkg_type; struct rtw_phy_cond phy_cond; + struct rtw_phy_cond2 phy_cond2; bool rfe_btg; u8 ps_mode; @@ -1934,6 +1959,7 @@ struct rtw_hal { u32 antenna_rx; u8 bfee_sts_cap; bool txrx_1ss; + bool cck_high_power; /* protect tx power section */ struct mutex tx_power_mutex; @@ -2189,6 +2215,7 @@ void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif, void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, bool hw_scan); int rtw_core_start(struct rtw_dev *rtwdev); +void rtw_power_off(struct rtw_dev *rtwdev); void rtw_core_stop(struct rtw_dev *rtwdev); int rtw_chip_info_setup(struct rtw_dev *rtwdev); int rtw_core_init(struct rtw_dev *rtwdev); @@ -2203,6 +2230,8 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta, bool fw_exist); void rtw_fw_recovery(struct rtw_dev *rtwdev); +int rtw_wait_firmware_completion(struct rtw_dev *rtwdev); +int rtw_power_on(struct rtw_dev *rtwdev); void rtw_core_fw_scan_notify(struct rtw_dev *rtwdev, bool start); int rtw_dump_fw(struct rtw_dev *rtwdev, const u32 ocp_src, u32 size, u32 fwcd_item); diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index f71e41d6f97c..0ecaefc4c83d 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -824,7 +824,7 @@ static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev, pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); memset(pkt_desc, 0, tx_pkt_desc_sz); pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); - rtw_tx_fill_tx_desc(pkt_info, skb); + rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&rtwpci->pdev->dev, dma)) diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index d57a2aabd89b..8ed20c89d216 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -18,7 +18,10 @@ struct phy_cfg_pair { }; union phy_table_tile { - struct rtw_phy_cond cond; + struct { + struct rtw_phy_cond cond; + struct rtw_phy_cond2 cond2; + } __packed; struct phy_cfg_pair cfg; }; @@ -527,6 +530,13 @@ static void rtw_phy_dig(struct rtw_dev *rtwdev) */ rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt); + /* Mitigate beacon loss and connectivity issues, mainly (only?) + * in the 5 GHz band + */ + if (rtwdev->chip->id == RTW_CHIP_TYPE_8812A && rtwdev->beacon_loss && + linked && dm_info->total_fa_cnt < DIG_PERF_FA_TH_EXTRA_HIGH) + cur_igi = DIG_CVRG_MIN; + if (cur_igi != pre_igi) rtw_phy_dig_write(rtwdev, cur_igi); } @@ -1041,7 +1051,8 @@ void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) { struct rtw_hal *hal = &rtwdev->hal; struct rtw_efuse *efuse = &rtwdev->efuse; - struct rtw_phy_cond cond = {0}; + struct rtw_phy_cond cond = {}; + struct rtw_phy_cond2 cond2 = {}; cond.cut = hal->cut_version ? hal->cut_version : 15; cond.pkg = pkg ? pkg : 15; @@ -1061,15 +1072,34 @@ void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg) break; } + if (rtwdev->chip->id == RTW_CHIP_TYPE_8812A || + rtwdev->chip->id == RTW_CHIP_TYPE_8821A) { + cond.rfe = 0; + cond.rfe |= efuse->ext_lna_2g; + cond.rfe |= efuse->ext_pa_2g << 1; + cond.rfe |= efuse->ext_lna_5g << 2; + cond.rfe |= efuse->ext_pa_5g << 3; + cond.rfe |= efuse->btcoex << 4; + + cond2.type_alna = efuse->alna_type; + cond2.type_glna = efuse->glna_type; + cond2.type_apa = efuse->apa_type; + cond2.type_gpa = efuse->gpa_type; + } + hal->phy_cond = cond; + hal->phy_cond2 = cond2; - rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond)); + rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x cond2=0x%08x\n", + *((u32 *)&hal->phy_cond), *((u32 *)&hal->phy_cond2)); } -static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) +static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond, + struct rtw_phy_cond2 cond2) { struct rtw_hal *hal = &rtwdev->hal; struct rtw_phy_cond drv_cond = hal->phy_cond; + struct rtw_phy_cond2 drv_cond2 = hal->phy_cond2; if (cond.cut && cond.cut != drv_cond.cut) return false; @@ -1080,8 +1110,29 @@ static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond) if (cond.intf && cond.intf != drv_cond.intf) return false; - if (cond.rfe != drv_cond.rfe) - return false; + if (rtwdev->chip->id == RTW_CHIP_TYPE_8812A || + rtwdev->chip->id == RTW_CHIP_TYPE_8821A) { + if (!(cond.rfe & 0x0f)) + return true; + + if ((cond.rfe & drv_cond.rfe) != cond.rfe) + return false; + + if ((cond.rfe & BIT(0)) && cond2.type_glna != drv_cond2.type_glna) + return false; + + if ((cond.rfe & BIT(1)) && cond2.type_gpa != drv_cond2.type_gpa) + return false; + + if ((cond.rfe & BIT(2)) && cond2.type_alna != drv_cond2.type_alna) + return false; + + if ((cond.rfe & BIT(3)) && cond2.type_apa != drv_cond2.type_apa) + return false; + } else { + if (cond.rfe != drv_cond.rfe) + return false; + } return true; } @@ -1090,7 +1141,8 @@ void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) { const union phy_table_tile *p = tbl->data; const union phy_table_tile *end = p + tbl->size / 2; - struct rtw_phy_cond pos_cond = {0}; + struct rtw_phy_cond pos_cond = {}; + struct rtw_phy_cond2 pos_cond2 = {}; bool is_matched = true, is_skipped = false; BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair)); @@ -1109,11 +1161,12 @@ void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl) case BRANCH_ELIF: default: pos_cond = p->cond; + pos_cond2 = p->cond2; break; } } else if (p->cond.neg) { if (!is_skipped) { - if (check_positive(rtwdev, pos_cond)) { + if (check_positive(rtwdev, pos_cond, pos_cond2)) { is_matched = true; is_skipped = true; } else { @@ -1470,10 +1523,8 @@ static void rtw_phy_store_tx_power_by_rate(struct rtw_dev *rtwdev, rate = rates[i]; if (band == PHY_BAND_2G) hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset; - else if (band == PHY_BAND_5G) - hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset; else - continue; + hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset; } } @@ -2125,8 +2176,8 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw, *limit = rtw_phy_get_tx_power_limit(rtwdev, band, bw, path, rate, ch, regd); - *remnant = (rate <= DESC_RATE11M ? dm_info->txagc_remnant_cck : - dm_info->txagc_remnant_ofdm); + *remnant = rate <= DESC_RATE11M ? dm_info->txagc_remnant_cck : + dm_info->txagc_remnant_ofdm[path]; *sar = rtw_phy_get_tx_power_sar(rtwdev, hal->sar_band, path, rate); } @@ -2340,7 +2391,8 @@ void rtw_phy_init_tx_power(struct rtw_dev *rtwdev) void rtw_phy_config_swing_table(struct rtw_dev *rtwdev, struct rtw_swing_table *swing_table) { - const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl; + const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev); + const struct rtw_pwr_track_tbl *tbl = rfe_def->pwr_track_tbl; u8 channel = rtwdev->hal.current_channel; if (IS_CH_2G_BAND(channel)) { diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 4d9b8668e8b0..e4d506cf9c33 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -9,6 +9,7 @@ #define BIT_FEN_EN_25_1 BIT(13) #define BIT_FEN_ELDR BIT(12) #define BIT_FEN_CPUEN BIT(2) +#define BIT_FEN_USBA BIT(2) #define BIT_FEN_BB_GLB_RST BIT(1) #define BIT_FEN_BB_RSTB BIT(0) #define BIT_R_DIS_PRST BIT(6) @@ -16,6 +17,10 @@ #define REG_SYS_PW_CTRL 0x0004 #define BIT_PFM_WOWL BIT(3) #define BIT_APFM_OFFMAC BIT(9) +#define REG_APS_FSMCO 0x0004 +#define APS_FSMCO_MAC_ENABLE BIT(8) +#define APS_FSMCO_MAC_OFF BIT(9) +#define APS_FSMCO_HW_POWERDOWN BIT(15) #define REG_SYS_CLK_CTRL 0x0008 #define BIT_CPU_CLK_EN BIT(14) @@ -58,6 +63,8 @@ #define BIT_SHIFT_LDO25_VOLTAGE 4 #define BIT_LDO25_EN BIT(7) +#define REG_ACLK_MON 0x3e + #define REG_GPIO_MUXCFG 0x0040 #define BIT_FSPI_EN BIT(19) #define BIT_EN_SIC BIT(12) @@ -90,6 +97,8 @@ #define BIT_USB_SUS_DIS BIT(8) #define BIT_SDIO_PAD_E5 BIT(18) +#define REG_RF_B_CTRL 0x76 + #define REG_AFE_CTRL_4 0x0078 #define BIT_CK320M_AFE_EN BIT(4) #define BIT_EN_SYN BIT(15) @@ -134,6 +143,11 @@ #define REG_PMC_DBG_CTRL1 0xa8 #define BITS_PMC_BT_IQK_STS GENMASK(22, 21) +#define REG_HIMR0 0xb0 +#define REG_HISR0 0xb4 +#define REG_HIMR1 0xb8 +#define REG_HISR1 0xbc + #define REG_PAD_CTRL2 0x00C4 #define BIT_RSM_EN_V1 BIT(16) #define BIT_NO_PDN_CHIPOFF_V1 BIT(17) @@ -185,6 +199,15 @@ #define MAC_TRX_ENABLE (BIT_HCI_TXDMA_EN | BIT_HCI_RXDMA_EN | BIT_TXDMA_EN | \ BIT_RXDMA_EN | BIT_PROTOCOL_EN | BIT_SCHEDULE_EN | \ BIT_MACTXEN | BIT_MACRXEN) +#define REG_PBP 0x104 +#define PBP_RX_MASK 0x0f +#define PBP_TX_MASK 0xf0 +#define PBP_64 0x0 +#define PBP_128 0x1 +#define PBP_256 0x2 +#define PBP_512 0x3 +#define PBP_1024 0x4 + #define BIT_SHIFT_TXDMA_VOQ_MAP 4 #define BIT_MASK_TXDMA_VOQ_MAP 0x3 #define BIT_TXDMA_VOQ_MAP(x) \ @@ -256,6 +279,8 @@ #define REG_HMEBOX1 0x01D4 #define REG_HMEBOX2 0x01D8 #define REG_HMEBOX3 0x01DC +#define REG_LLT_INIT 0x01E0 +#define BIT_LLT_WRITE_ACCESS BIT(30) #define REG_HMEBOX0_EX 0x01F0 #define REG_HMEBOX1_EX 0x01F4 #define REG_HMEBOX2_EX 0x01F8 @@ -298,6 +323,7 @@ #define REG_AUTO_LLT 0x0224 #define BIT_AUTO_INIT_LLT BIT(16) +#define REG_DWBCN1_CTRL 0x0228 #define REG_RQPN_CTRL_1 0x0228 #define REG_RQPN_CTRL_2 0x022C #define BIT_LD_RQPN BIT(31) @@ -329,6 +355,7 @@ #define BIT_DMA_BURST_SIZE_1024 0 #define REG_RXPKTNUM 0x02B0 +#define REG_EARLY_MODE_CONTROL 0x02BC #define REG_INT_MIG 0x0304 #define REG_HCI_MIX_CFG 0x03FC @@ -336,6 +363,7 @@ #define REG_BCNQ_INFO 0x0418 #define BIT_MGQ_CPU_EMPTY BIT(24) +#define REG_TXPKT_EMPTY 0x041A #define REG_FWHW_TXQ_CTRL 0x0420 #define BIT_EN_BCNQ_DL BIT(22) #define BIT_EN_WR_FREE_TAIL BIT(20) @@ -362,10 +390,12 @@ #define REG_AMPDU_MAX_TIME_V1 0x0455 #define REG_BCNQ1_BDNY_V1 0x0456 #define REG_AMPDU_MAX_TIME 0x0456 +#define REG_AMPDU_MAX_LENGTH 0x0458 #define REG_WMAC_LBK_BF_HD 0x045D #define REG_TX_HANG_CTRL 0x045E #define BIT_EN_GNT_BT_AWAKE BIT(3) #define BIT_EN_EOF_V1 BIT(2) +#define REG_FAST_EDCA_CTRL 0x0460 #define REG_DATA_SC 0x0483 #define REG_ARFR2_V1 0x048C #define REG_ARFRH2_V1 0x0490 @@ -390,6 +420,8 @@ #define REG_PRECNT_CTRL 0x04E5 #define BIT_BTCCA_CTRL (BIT(0) | BIT(1)) #define BIT_EN_PRECNT BIT(11) +#define REG_TX_RPT_CTRL 0x04EC +#define REG_TX_RPT_TIME 0x04F0 #define REG_DUMMY_PAGE4_V1 0x04FC #define REG_EDCA_VO_PARAM 0x0500 @@ -400,6 +432,7 @@ #define BIT_MASK_CWMAX GENMASK(15, 12) #define BIT_MASK_CWMIN GENMASK(11, 8) #define BIT_MASK_AIFS GENMASK(7, 0) +#define REG_BCNTCFG 0x0510 #define REG_PIFS 0x0512 #define REG_SIFS 0x0514 #define BIT_SHIFT_SIFS_OFDM_CTX 8 @@ -526,6 +559,8 @@ #define REG_BT_COEX_V2 0x0762 #define BIT_GNT_BT_POLARITY BIT(12) #define BIT_LTE_COEX_EN BIT(7) +#define REG_GNT_BT 0x0765 +#define BIT_PTA_SW_CTL GENMASK(4, 3) #define REG_BT_COEX_ENH_INTR_CTRL 0x76E #define BIT_R_GRANTALL_WLMASK BIT(3) #define BIT_STATIS_BT_EN BIT(2) @@ -543,14 +578,43 @@ #define REG_FPGA0_RFMOD 0x0800 #define BIT_CCKEN BIT(24) #define BIT_OFDMEN BIT(25) +#define REG_CCK_RPT_FORMAT 0x0804 +#define BIT_CCK_RPT_FORMAT BIT(16) +#define REG_RXPSEL 0x0808 +#define BIT_RX_PSEL_RST (BIT(28) | BIT(29)) +#define REG_TXPSEL 0x080C #define REG_RX_GAIN_EN 0x081c +#define REG_CCASEL 0x082C +#define REG_PDMFTH 0x0830 +#define REG_BWINDICATION 0x0834 +#define REG_CCA2ND 0x0838 +#define REG_L1PKTH 0x0848 +#define REG_CLKTRK 0x0860 +#define REG_ADCCLK 0x08AC +#define REG_HSSI_READ 0x08B0 +#define REG_FPGA0_XCD_RF_PARA 0x08B4 +#define REG_RX_MCS_LIMIT 0x08BC +#define REG_ADC160 0x08C4 +#define REG_ANTSEL_SW 0x0900 +#define REG_DAC_RSTB 0x090c +#define REG_SINGLE_TONE_CONT_TX 0x0914 #define REG_RFE_CTRL_E 0x0974 #define REG_2ND_CCA_CTRL 0x0976 +#define REG_IQK_COM00 0x0978 +#define REG_IQK_COM32 0x097c +#define REG_IQK_COM64 0x0980 +#define REG_IQK_COM96 0x0984 + +#define REG_FAS 0x09a4 +#define REG_RXSB 0x0a00 +#define REG_CCK_RX 0x0a04 +#define REG_CCK_PD_TH 0x0a0a #define REG_CCK0_FAREPORT 0xa2c #define BIT_CCK0_2RX BIT(18) #define BIT_CCK0_MRC BIT(22) +#define REG_FA_CCK 0x0a5c #define REG_DIS_DPD 0x0a70 #define DIS_DPD_MASK GENMASK(9, 0) @@ -566,13 +630,109 @@ #define DIS_DPD_RATEVHT2SS_MCS1 BIT(9) #define DIS_DPD_RATEALL GENMASK(9, 0) +#define REG_CNTRST 0x0b58 + +#define REG_3WIRE_SWA 0x0c00 +#define REG_RX_IQC_AB_A 0x0c10 +#define REG_TXSCALE_A 0x0c1c +#define BB_SWING_MASK GENMASK(31, 21) +#define REG_TX_AGC_A_CCK_11_CCK_1 0xc20 +#define REG_TX_AGC_A_OFDM18_OFDM6 0xc24 +#define REG_TX_AGC_A_OFDM54_OFDM24 0xc28 +#define REG_TX_AGC_A_MCS3_MCS0 0xc2c +#define REG_TX_AGC_A_MCS7_MCS4 0xc30 +#define REG_TX_AGC_A_MCS11_MCS8 0xc34 +#define REG_TX_AGC_A_MCS15_MCS12 0xc38 +#define REG_TX_AGC_A_NSS1_INDEX3_NSS1_INDEX0 0xc3c +#define REG_TX_AGC_A_NSS1_INDEX7_NSS1_INDEX4 0xc40 +#define REG_TX_AGC_A_NSS2_INDEX1_NSS1_INDEX8 0xc44 +#define REG_TX_AGC_A_NSS2_INDEX5_NSS2_INDEX2 0xc48 +#define REG_TX_AGC_A_NSS2_INDEX9_NSS2_INDEX6 0xc4c +#define REG_RXIGI_A 0x0c50 +#define REG_TX_PWR_TRAINING_A 0x0c54 +#define REG_CK_MONHA 0x0c5c +#define REG_AFE_PWR1_A 0x0c60 +#define REG_AFE_PWR2_A 0x0c64 +#define REG_RX_WAIT_CCA_TX_CCK_RFON_A 0x0c68 +#define REG_OFDM0_XA_TX_IQ_IMBALANCE 0x0c80 +#define REG_OFDM0_A_TX_AFE 0x0c84 +#define REG_OFDM0_XB_TX_IQ_IMBALANCE 0x0c88 +#define REG_TSSI_TRK_SW 0x0c8c +#define REG_LSSI_WRITE_A 0x0c90 +#define REG_PREDISTA 0x0c90 +#define REG_TXAGCIDX 0x0c94 + +#define REG_RFE_PINMUX_A 0x0cb0 +#define REG_RFE_INV_A 0x0cb4 #define REG_RFE_CTRL8 0x0cb4 #define BIT_MASK_RFE_SEL89 GENMASK(7, 0) +#define PTA_CTRL_PIN 0x66 +#define DPDT_CTRL_PIN 0x77 +#define RFE_INV_MASK 0x3ff00000 +#define REG_RFECTL_A 0x0cb8 #define REG_RFE_INV8 0x0cbd #define BIT_MASK_RFE_INV89 GENMASK(1, 0) #define REG_RFE_INV16 0x0cbe #define BIT_RFE_BUF_EN BIT(3) +#define REG_IQK_DPD_CFG 0x0cc4 +#define REG_CFG_PMPD 0x0cc8 +#define REG_IQC_Y 0x0ccc +#define REG_IQC_X 0x0cd4 +#define REG_INTPO_SETA 0x0ce8 + +#define REG_IQKA_END 0x0d00 +#define REG_PI_READ_A 0x0d04 +#define REG_SI_READ_A 0x0d08 +#define REG_IQKB_END 0x0d40 +#define REG_PI_READ_B 0x0d44 +#define REG_SI_READ_B 0x0d48 + +#define REG_3WIRE_SWB 0x0e00 +#define REG_RX_IQC_AB_B 0x0e10 +#define REG_TXSCALE_B 0x0e1c +#define REG_TX_AGC_B_CCK_11_CCK_1 0xe20 +#define REG_TX_AGC_B_OFDM18_OFDM6 0xe24 +#define REG_TX_AGC_B_OFDM54_OFDM24 0xe28 +#define REG_TX_AGC_B_MCS3_MCS0 0xe2c +#define REG_TX_AGC_B_MCS7_MCS4 0xe30 +#define REG_TX_AGC_B_MCS11_MCS8 0xe34 +#define REG_TX_AGC_B_MCS15_MCS12 0xe38 +#define REG_TX_AGC_B_NSS1_INDEX3_NSS1_INDEX0 0xe3c +#define REG_TX_AGC_B_NSS1_INDEX7_NSS1_INDEX4 0xe40 +#define REG_TX_AGC_B_NSS2_INDEX1_NSS1_INDEX8 0xe44 +#define REG_TX_AGC_B_NSS2_INDEX5_NSS2_INDEX2 0xe48 +#define REG_TX_AGC_B_NSS2_INDEX9_NSS2_INDEX6 0xe4c +#define REG_RXIGI_B 0x0e50 +#define REG_TX_PWR_TRAINING_B 0x0e54 +#define REG_CK_MONHB 0x0e5c +#define REG_AFE_PWR1_B 0x0e60 +#define REG_AFE_PWR2_B 0x0e64 +#define REG_RX_WAIT_CCA_TX_CCK_RFON_B 0x0e68 +#define REG_TXTONEB 0x0e80 +#define REG_RXTONEB 0x0e84 +#define REG_TXPITMB 0x0e88 +#define REG_RXPITMB 0x0e8c +#define REG_LSSI_WRITE_B 0x0e90 +#define REG_PREDISTB 0x0e90 +#define REG_INIDLYB 0x0e94 +#define REG_RFE_PINMUX_B 0x0eb0 +#define REG_RFE_INV_B 0x0eb4 +#define REG_RFECTL_B 0x0eb8 +#define REG_BPBDB 0x0ec4 +#define REG_PHYTXONB 0x0ec8 +#define REG_IQKYB 0x0ecc +#define REG_IQKXB 0x0ed4 +#define REG_INTPO_SETB 0x0ee8 + +#define REG_CRC_CCK 0x0f04 +#define REG_CCA_OFDM 0x0f08 +#define REG_CRC_VHT 0x0f0c +#define REG_CRC_HT 0x0f10 +#define REG_CRC_OFDM 0x0f14 +#define REG_FA_OFDM 0x0f48 +#define REG_CCA_CCK 0x0fcc + #define REG_ANAPARSW_MAC_0 0x1010 #define BIT_CF_L_V2 GENMASK(29, 28) @@ -709,6 +869,10 @@ #define REG_IGN_GNTBT4 0x4160 +#define REG_USB_MOD 0xf008 +#define REG_USB3_RXITV 0xf050 +#define REG_USB_HRPWM 0xfe58 + #define RF_MODE 0x00 #define RF_MODOPT 0x01 #define RF_WLINT 0x01 @@ -716,7 +880,13 @@ #define RF_DTXLOK 0x08 #define RF_CFGCH 0x18 #define BIT_BAND GENMASK(18, 16) +#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8)) +#define RF18_CHANNEL_MASK (MASKBYTE0) +#define RF18_RFSI_MASK (BIT(18) | BIT(17)) #define RF_RCK 0x1d +#define RF_MODE_TABLE_ADDR 0x30 +#define RF_MODE_TABLE_DATA0 0x31 +#define RF_MODE_TABLE_DATA1 0x32 #define RF_LUTWA 0x33 #define RF_LUTWD1 0x3e #define RF_LUTWD0 0x3f @@ -725,10 +895,14 @@ #define RF_T_METER 0x42 #define RF_BSPAD 0x54 #define RF_GAINTX 0x56 +#define RF_TXMOD 0x58 #define RF_TXATANK 0x64 +#define RF_TXA_PREPAD 0x65 #define RF_TRXIQ 0x66 #define RF_RXIQGEN 0x8d +#define RF_RXBB2 0x8f #define RF_SYN_PFD 0xb0 +#define RF_LCK 0xb4 #define RF_XTALX2 0xb8 #define RF_SYN_CTRL 0xbb #define RF_MALSEL 0xbe diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c index 77399b8dd8cd..a19b94d022ee 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c @@ -493,11 +493,6 @@ static const struct rtw_pwr_seq_cmd * const card_disable_flow_8703b[] = { NULL }; -static const struct rtw_rfe_def rtw8703b_rfe_defs[] = { - [0] = { .phy_pg_tbl = &rtw8703b_bb_pg_tbl, - .txpwr_lmt_tbl = &rtw8703b_txpwr_lmt_tbl,}, -}; - static const struct rtw_page_table page_table_8703b[] = { {12, 2, 2, 0, 1}, {12, 2, 2, 0, 1}, @@ -637,7 +632,7 @@ static void rtw8703b_pwrtrack_init(struct rtw_dev *rtwdev) dm_info->pwr_trk_init_trigger = true; dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; dm_info->txagc_remnant_cck = 0; - dm_info->txagc_remnant_ofdm = 0; + dm_info->txagc_remnant_ofdm[RF_PATH_A] = 0; } static void rtw8703b_phy_set_param(struct rtw_dev *rtwdev) @@ -1589,7 +1584,7 @@ static void rtw8703b_pwrtrack_set_ofdm_pwr(struct rtw_dev *rtwdev, s8 swing_idx, { struct rtw_dm_info *dm_info = &rtwdev->dm_info; - dm_info->txagc_remnant_ofdm = txagc_idx; + dm_info->txagc_remnant_ofdm[RF_PATH_A] = txagc_idx; /* Only path A is calibrated for rtl8703b */ rtw8703b_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_A); @@ -1818,6 +1813,12 @@ static const struct rtw_pwr_track_tbl rtw8703b_rtw_pwr_track_tbl = { .pwrtrk_xtal_p = rtw8703b_pwrtrk_xtal_p, }; +static const struct rtw_rfe_def rtw8703b_rfe_defs[] = { + [0] = { .phy_pg_tbl = &rtw8703b_bb_pg_tbl, + .txpwr_lmt_tbl = &rtw8703b_txpwr_lmt_tbl, + .pwr_track_tbl = &rtw8703b_rtw_pwr_track_tbl, }, +}; + /* Shared-Antenna Coex Table */ static const struct coex_table_para table_sant_8703b[] = { {0xffffffff, 0xffffffff}, /* case-0 */ @@ -1888,6 +1889,8 @@ static const struct coex_tdma_para tdma_sant_8703b[] = { }; static const struct rtw_chip_ops rtw8703b_ops = { + .power_on = rtw_power_on, + .power_off = rtw_power_off, .mac_init = rtw8723x_mac_init, .dump_fw_crash = NULL, .shutdown = NULL, @@ -1960,6 +1963,9 @@ const struct rtw_chip_info rtw8703b_hw_spec = { .max_power_index = 0x3f, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, .usb_tx_agg_desc_num = 1, /* Not sure if this chip has USB interface */ + .hw_feature_report = true, + .c2h_ra_report_size = 7, + .old_datarate_fb_limit = true, .path_div_supported = false, .ht_supported = true, @@ -1992,7 +1998,6 @@ const struct rtw_chip_info rtw8703b_hw_spec = { .rfe_defs_size = ARRAY_SIZE(rtw8703b_rfe_defs), .iqk_threshold = 8, - .pwr_track_tbl = &rtw8703b_rtw_pwr_track_tbl, /* WOWLAN firmware exists, but not implemented yet */ .wow_fw_name = "rtw88/rtw8703b_wow_fw.bin", diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index 86a5e2497641..eeca31bf71f1 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -79,7 +79,7 @@ static void rtw8723d_pwrtrack_init(struct rtw_dev *rtwdev) dm_info->pwr_trk_init_trigger = true; dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; dm_info->txagc_remnant_cck = 0; - dm_info->txagc_remnant_ofdm = 0; + dm_info->txagc_remnant_ofdm[RF_PATH_A] = 0; } static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev) @@ -1265,7 +1265,7 @@ static void rtw8723d_pwrtrack_set_ofdm_pwr(struct rtw_dev *rtwdev, s8 swing_idx, { struct rtw_dm_info *dm_info = &rtwdev->dm_info; - dm_info->txagc_remnant_ofdm = txagc_idx; + dm_info->txagc_remnant_ofdm[RF_PATH_A] = txagc_idx; rtw8723d_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_A); rtw8723d_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_B); @@ -1390,6 +1390,8 @@ static void rtw8723d_pwr_track(struct rtw_dev *rtwdev) } static const struct rtw_chip_ops rtw8723d_ops = { + .power_on = rtw_power_on, + .power_off = rtw_power_off, .phy_set_param = rtw8723d_phy_set_param, .read_efuse = rtw8723x_read_efuse, .query_phy_status = query_phy_status, @@ -2018,11 +2020,6 @@ static const struct rtw_intf_phy_para_table phy_para_table_8723d = { .n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8723d), }; -static const struct rtw_rfe_def rtw8723d_rfe_defs[] = { - [0] = { .phy_pg_tbl = &rtw8723d_bb_pg_tbl, - .txpwr_lmt_tbl = &rtw8723d_txpwr_lmt_tbl,}, -}; - static const u8 rtw8723d_pwrtrk_2gb_n[] = { 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 10, 10 @@ -2086,6 +2083,12 @@ static const struct rtw_pwr_track_tbl rtw8723d_rtw_pwr_track_tbl = { .pwrtrk_xtal_n = rtw8723d_pwrtrk_xtal_n, }; +static const struct rtw_rfe_def rtw8723d_rfe_defs[] = { + [0] = { .phy_pg_tbl = &rtw8723d_bb_pg_tbl, + .txpwr_lmt_tbl = &rtw8723d_txpwr_lmt_tbl, + .pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl, }, +}; + static const struct rtw_reg_domain coex_info_hw_regs_8723d[] = { {0x948, MASKDWORD, RTW_REG_DOMAIN_MAC32}, {0x67, BIT(7), RTW_REG_DOMAIN_MAC8}, @@ -2131,6 +2134,9 @@ const struct rtw_chip_info rtw8723d_hw_spec = { .page_size = TX_PAGE_SIZE, .dig_min = 0x20, .usb_tx_agg_desc_num = 1, + .hw_feature_report = true, + .c2h_ra_report_size = 7, + .old_datarate_fb_limit = true, .ht_supported = true, .vht_supported = false, .lps_deep_mode_supported = 0, @@ -2154,7 +2160,6 @@ const struct rtw_chip_info rtw8723d_hw_spec = { .rfe_defs = rtw8723d_rfe_defs, .rfe_defs_size = ARRAY_SIZE(rtw8723d_rfe_defs), .rx_ldpc = false, - .pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl, .iqk_threshold = 8, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, .max_scan_ie_len = IEEE80211_MAX_DATA_LEN, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.c b/drivers/net/wireless/realtek/rtw88/rtw8723x.c index 0d0b6c2cb9aa..69f73cb5b4cd 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723x.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.c @@ -595,7 +595,8 @@ void __rtw8723x_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path, u8 delta) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; - const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl; + const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev); + const struct rtw_pwr_track_tbl *tbl = rfe_def->pwr_track_tbl; const s8 *pwrtrk_xtal; s8 xtal_cap; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a.c b/drivers/net/wireless/realtek/rtw88/rtw8812a.c new file mode 100644 index 000000000000..482edd31823d --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8812a.c @@ -0,0 +1,1102 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "main.h" +#include "coex.h" +#include "phy.h" +#include "reg.h" +#include "rtw88xxa.h" +#include "rtw8812a.h" +#include "rtw8812a_table.h" +#include "tx.h" + +static void rtw8812a_power_off(struct rtw_dev *rtwdev) +{ + rtw88xxa_power_off(rtwdev, enter_lps_flow_8812a); +} + +static s8 rtw8812a_cck_rx_pwr(u8 lna_idx, u8 vga_idx) +{ + s8 rx_pwr_all = 0; + + switch (lna_idx) { + case 7: + if (vga_idx <= 27) + rx_pwr_all = -94 + 2 * (27 - vga_idx); + else + rx_pwr_all = -94; + break; + case 6: + rx_pwr_all = -42 + 2 * (2 - vga_idx); + break; + case 5: + rx_pwr_all = -36 + 2 * (7 - vga_idx); + break; + case 4: + rx_pwr_all = -30 + 2 * (7 - vga_idx); + break; + case 3: + rx_pwr_all = -18 + 2 * (7 - vga_idx); + break; + case 2: + rx_pwr_all = 2 * (5 - vga_idx); + break; + case 1: + rx_pwr_all = 14 - 2 * vga_idx; + break; + case 0: + rx_pwr_all = 20 - 2 * vga_idx; + break; + default: + break; + } + + return rx_pwr_all; +} + +static void rtw8812a_query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, + struct rtw_rx_pkt_stat *pkt_stat) +{ + rtw88xxa_query_phy_status(rtwdev, phy_status, pkt_stat, + rtw8812a_cck_rx_pwr); + + if (pkt_stat->rate >= DESC_RATE6M) + return; + + if (rtwdev->hal.cck_high_power) + return; + + if (pkt_stat->rssi >= 80) + pkt_stat->rssi = ((pkt_stat->rssi - 80) << 1) + + ((pkt_stat->rssi - 80) >> 1) + 80; + else if (pkt_stat->rssi <= 78 && pkt_stat->rssi >= 20) + pkt_stat->rssi += 3; +} + +static void rtw8812a_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) +{ +} + +static void rtw8812a_do_lck(struct rtw_dev *rtwdev) +{ + u32 cont_tx, lc_cal, i; + + cont_tx = rtw_read32_mask(rtwdev, REG_SINGLE_TONE_CONT_TX, 0x70000); + + lc_cal = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK); + + if (!cont_tx) + rtw_write8(rtwdev, REG_TXPAUSE, 0xff); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_LCK, BIT(14), 1); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, 0x08000, 1); + + mdelay(150); + + for (i = 0; i < 5; i++) { + if (rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, 0x08000) != 1) + break; + + mdelay(10); + } + + if (i == 5) + rtw_dbg(rtwdev, RTW_DBG_RFK, "LCK timed out\n"); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_LCK, BIT(14), 0); + + if (!cont_tx) + rtw_write8(rtwdev, REG_TXPAUSE, 0); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal); +} + +static void rtw8812a_iqk_backup_rf(struct rtw_dev *rtwdev, u32 *rfa_backup, + u32 *rfb_backup, const u32 *backup_rf_reg, + u32 rf_num) +{ + u32 i; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + /* Save RF Parameters */ + for (i = 0; i < rf_num; i++) { + rfa_backup[i] = rtw_read_rf(rtwdev, RF_PATH_A, + backup_rf_reg[i], MASKDWORD); + rfb_backup[i] = rtw_read_rf(rtwdev, RF_PATH_B, + backup_rf_reg[i], MASKDWORD); + } +} + +static void rtw8812a_iqk_restore_rf(struct rtw_dev *rtwdev, + enum rtw_rf_path path, + const u32 *backup_rf_reg, + u32 *RF_backup, u32 rf_reg_num) +{ + u32 i; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + for (i = 0; i < rf_reg_num; i++) + rtw_write_rf(rtwdev, path, backup_rf_reg[i], + RFREG_MASK, RF_backup[i]); + + rtw_write_rf(rtwdev, path, RF_LUTWE, RFREG_MASK, 0); +} + +static void rtw8812a_iqk_restore_afe(struct rtw_dev *rtwdev, u32 *afe_backup, + const u32 *backup_afe_reg, u32 afe_num) +{ + u32 i; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + /* Reload AFE Parameters */ + for (i = 0; i < afe_num; i++) + rtw_write32(rtwdev, backup_afe_reg[i], afe_backup[i]); + + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + rtw_write32(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, 0x0); + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x0); + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, 0x0); + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x3c000000); + rtw_write32_mask(rtwdev, REG_LSSI_WRITE_A, BIT(7), 1); + rtw_write32_mask(rtwdev, REG_IQK_DPD_CFG, BIT(18), 1); + rtw_write32_mask(rtwdev, REG_IQK_DPD_CFG, BIT(29), 1); + rtw_write32_mask(rtwdev, REG_CFG_PMPD, BIT(29), 1); + + rtw_write32(rtwdev, REG_TXTONEB, 0x0); + rtw_write32(rtwdev, REG_RXTONEB, 0x0); + rtw_write32(rtwdev, REG_TXPITMB, 0x0); + rtw_write32(rtwdev, REG_RXPITMB, 0x3c000000); + rtw_write32_mask(rtwdev, REG_LSSI_WRITE_B, BIT(7), 1); + rtw_write32_mask(rtwdev, REG_BPBDB, BIT(18), 1); + rtw_write32_mask(rtwdev, REG_BPBDB, BIT(29), 1); + rtw_write32_mask(rtwdev, REG_PHYTXONB, BIT(29), 1); +} + +static void rtw8812a_iqk_rx_fill(struct rtw_dev *rtwdev, enum rtw_rf_path path, + unsigned int rx_x, unsigned int rx_y) +{ + switch (path) { + case RF_PATH_A: + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + if (rx_x >> 1 >= 0x112 || + (rx_y >> 1 >= 0x12 && rx_y >> 1 <= 0x3ee)) { + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_A, + 0x000003ff, 0x100); + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_A, + 0x03ff0000, 0); + } else { + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_A, + 0x000003ff, rx_x >> 1); + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_A, + 0x03ff0000, rx_y >> 1); + } + rtw_dbg(rtwdev, RTW_DBG_RFK, + "rx_x = %x;;rx_y = %x ====>fill to IQC\n", + rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff); + rtw_dbg(rtwdev, RTW_DBG_RFK, "0xc10 = %x ====>fill to IQC\n", + rtw_read32(rtwdev, REG_RX_IQC_AB_A)); + break; + case RF_PATH_B: + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + if (rx_x >> 1 >= 0x112 || + (rx_y >> 1 >= 0x12 && rx_y >> 1 <= 0x3ee)) { + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_B, + 0x000003ff, 0x100); + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_B, + 0x03ff0000, 0); + } else { + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_B, + 0x000003ff, rx_x >> 1); + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_B, + 0x03ff0000, rx_y >> 1); + } + rtw_dbg(rtwdev, RTW_DBG_RFK, + "rx_x = %x;;rx_y = %x ====>fill to IQC\n", + rx_x >> 1 & 0x000003ff, rx_y >> 1 & 0x000003ff); + rtw_dbg(rtwdev, RTW_DBG_RFK, "0xe10 = %x====>fill to IQC\n", + rtw_read32(rtwdev, REG_RX_IQC_AB_B)); + break; + default: + break; + } +} + +static void rtw8812a_iqk_tx_fill(struct rtw_dev *rtwdev, enum rtw_rf_path path, + unsigned int tx_x, unsigned int tx_y) +{ + switch (path) { + case RF_PATH_A: + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + rtw_write32_mask(rtwdev, REG_PREDISTA, BIT(7), 0x1); + rtw_write32_mask(rtwdev, REG_IQK_DPD_CFG, BIT(18), 0x1); + rtw_write32_mask(rtwdev, REG_IQK_DPD_CFG, BIT(29), 0x1); + rtw_write32_mask(rtwdev, REG_CFG_PMPD, BIT(29), 0x1); + rtw_write32_mask(rtwdev, REG_IQC_Y, 0x000007ff, tx_y); + rtw_write32_mask(rtwdev, REG_IQC_X, 0x000007ff, tx_x); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "tx_x = %x;;tx_y = %x =====> fill to IQC\n", + tx_x & 0x000007ff, tx_y & 0x000007ff); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "0xcd4 = %x;;0xccc = %x ====>fill to IQC\n", + rtw_read32_mask(rtwdev, REG_IQC_X, 0x000007ff), + rtw_read32_mask(rtwdev, REG_IQC_Y, 0x000007ff)); + break; + case RF_PATH_B: + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + rtw_write32_mask(rtwdev, REG_PREDISTB, BIT(7), 0x1); + rtw_write32_mask(rtwdev, REG_BPBDB, BIT(18), 0x1); + rtw_write32_mask(rtwdev, REG_BPBDB, BIT(29), 0x1); + rtw_write32_mask(rtwdev, REG_PHYTXONB, BIT(29), 0x1); + rtw_write32_mask(rtwdev, REG_IQKYB, 0x000007ff, tx_y); + rtw_write32_mask(rtwdev, REG_IQKXB, 0x000007ff, tx_x); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "tx_x = %x;;tx_y = %x =====> fill to IQC\n", + tx_x & 0x000007ff, tx_y & 0x000007ff); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "0xed4 = %x;;0xecc = %x ====>fill to IQC\n", + rtw_read32_mask(rtwdev, REG_IQKXB, 0x000007ff), + rtw_read32_mask(rtwdev, REG_IQKYB, 0x000007ff)); + break; + default: + break; + } +} + +static void rtw8812a_iqk(struct rtw_dev *rtwdev) +{ + int tx_x0_temp[10], tx_y0_temp[10], tx_x1_temp[10], tx_y1_temp[10]; + int rx_x0_temp[10], rx_y0_temp[10], rx_x1_temp[10], rx_y1_temp[10]; + bool iqk0_ready = false, tx0_finish = false, rx0_finish = false; + bool iqk1_ready = false, tx1_finish = false, rx1_finish = false; + u8 tx0_avg = 0, tx1_avg = 0, rx0_avg = 0, rx1_avg = 0; + int tx_x0 = 0, tx_y0 = 0, tx_x1 = 0, tx_y1 = 0; + int rx_x0 = 0, rx_y0 = 0, rx_x1 = 0, rx_y1 = 0; + struct rtw_efuse *efuse = &rtwdev->efuse; + bool tx0_fail = true, rx0_fail = true; + bool tx1_fail = true, rx1_fail = true; + u8 cal0_retry, cal1_retry; + u8 delay_count; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + /* ========path-A AFE all on======== */ + /* Port 0 DAC/ADC on */ + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x77777777); + rtw_write32(rtwdev, REG_AFE_PWR2_A, 0x77777777); + + /* Port 1 DAC/ADC on */ + rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x77777777); + rtw_write32(rtwdev, REG_AFE_PWR2_B, 0x77777777); + + rtw_write32(rtwdev, REG_RX_WAIT_CCA_TX_CCK_RFON_A, 0x19791979); + rtw_write32(rtwdev, REG_RX_WAIT_CCA_TX_CCK_RFON_B, 0x19791979); + + /* hardware 3-wire off */ + rtw_write32_mask(rtwdev, REG_3WIRE_SWA, 0xf, 0x4); + rtw_write32_mask(rtwdev, REG_3WIRE_SWB, 0xf, 0x4); + + /* DAC/ADC sampling rate (160 MHz) */ + rtw_write32_mask(rtwdev, REG_CK_MONHA, GENMASK(26, 24), 0x7); + rtw_write32_mask(rtwdev, REG_CK_MONHB, GENMASK(26, 24), 0x7); + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + /* ====== path A TX IQK RF setting ====== */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80002); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_ADDR, RFREG_MASK, 0x20000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA0, RFREG_MASK, 0x3fffd); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA1, RFREG_MASK, 0xfe83f); + rtw_write_rf(rtwdev, RF_PATH_A, RF_TXA_PREPAD, RFREG_MASK, 0x931d5); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RXBB2, RFREG_MASK, 0x8a001); + + /* ====== path B TX IQK RF setting ====== */ + rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE, RFREG_MASK, 0x80002); + rtw_write_rf(rtwdev, RF_PATH_B, RF_MODE_TABLE_ADDR, RFREG_MASK, 0x20000); + rtw_write_rf(rtwdev, RF_PATH_B, RF_MODE_TABLE_DATA0, RFREG_MASK, 0x3fffd); + rtw_write_rf(rtwdev, RF_PATH_B, RF_MODE_TABLE_DATA1, RFREG_MASK, 0xfe83f); + rtw_write_rf(rtwdev, RF_PATH_B, RF_TXA_PREPAD, RFREG_MASK, 0x931d5); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RXBB2, RFREG_MASK, 0x8a001); + + rtw_write32(rtwdev, REG_DAC_RSTB, 0x00008000); + rtw_write32_mask(rtwdev, REG_TXAGCIDX, BIT(0), 0x1); + rtw_write32_mask(rtwdev, REG_INIDLYB, BIT(0), 0x1); + rtw_write32(rtwdev, REG_IQK_COM00, 0x29002000); /* TX (X,Y) */ + rtw_write32(rtwdev, REG_IQK_COM32, 0xa9002000); /* RX (X,Y) */ + rtw_write32(rtwdev, REG_IQK_COM96, 0x00462910); /* [0]:AGC_en, [15]:idac_K_Mask */ + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + if (efuse->ext_pa_5g) { + if (efuse->rfe_option == 1) { + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, 0x821403e3); + rtw_write32(rtwdev, REG_TXPITMB, 0x821403e3); + } else { + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, 0x821403f7); + rtw_write32(rtwdev, REG_TXPITMB, 0x821403f7); + } + } else { + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, 0x821403f1); + rtw_write32(rtwdev, REG_TXPITMB, 0x821403f1); + } + + if (rtwdev->hal.current_band_type == RTW_BAND_5G) { + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x68163e96); + rtw_write32(rtwdev, REG_RXPITMB, 0x68163e96); + } else { + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x28163e96); + rtw_write32(rtwdev, REG_RXPITMB, 0x28163e96); + + if (efuse->rfe_option == 3) { + if (efuse->ext_pa_2g) + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, + 0x821403e3); + else + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, + 0x821403f7); + } + } + + /* TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtw_write32(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, 0x18008c10); + /* RX_Tone_idx[9:0], RxK_Mask[29] */ + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x38008c10); + rtw_write32(rtwdev, REG_INTPO_SETA, 0x00000000); + /* TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtw_write32(rtwdev, REG_TXTONEB, 0x18008c10); + /* RX_Tone_idx[9:0], RxK_Mask[29] */ + rtw_write32(rtwdev, REG_RXTONEB, 0x38008c10); + rtw_write32(rtwdev, REG_INTPO_SETB, 0x00000000); + + cal0_retry = 0; + cal1_retry = 0; + while (1) { + /* one shot */ + rtw_write32(rtwdev, REG_RFECTL_A, 0x00100000); + rtw_write32(rtwdev, REG_RFECTL_B, 0x00100000); + rtw_write32(rtwdev, REG_IQK_COM64, 0xfa000000); + rtw_write32(rtwdev, REG_IQK_COM64, 0xf8000000); + + mdelay(10); + + rtw_write32(rtwdev, REG_RFECTL_A, 0x00000000); + rtw_write32(rtwdev, REG_RFECTL_B, 0x00000000); + + for (delay_count = 0; delay_count < 20; delay_count++) { + if (!tx0_finish) + iqk0_ready = rtw_read32_mask(rtwdev, + REG_IQKA_END, + BIT(10)); + if (!tx1_finish) + iqk1_ready = rtw_read32_mask(rtwdev, + REG_IQKB_END, + BIT(10)); + if (iqk0_ready && iqk1_ready) + break; + + mdelay(1); + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "TX delay_count = %d\n", + delay_count); + + if (delay_count < 20) { /* If 20ms No Result, then cal_retry++ */ + /* ============TXIQK Check============== */ + tx0_fail = rtw_read32_mask(rtwdev, REG_IQKA_END, BIT(12)); + tx1_fail = rtw_read32_mask(rtwdev, REG_IQKB_END, BIT(12)); + + if (!(tx0_fail || tx0_finish)) { + rtw_write32(rtwdev, REG_RFECTL_A, 0x02000000); + tx_x0_temp[tx0_avg] = rtw_read32_mask(rtwdev, + REG_IQKA_END, + 0x07ff0000); + rtw_write32(rtwdev, REG_RFECTL_A, 0x04000000); + tx_y0_temp[tx0_avg] = rtw_read32_mask(rtwdev, + REG_IQKA_END, + 0x07ff0000); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "tx_x0[%d] = %x ;; tx_y0[%d] = %x\n", + tx0_avg, tx_x0_temp[tx0_avg], + tx0_avg, tx_y0_temp[tx0_avg]); + + tx_x0_temp[tx0_avg] <<= 21; + tx_y0_temp[tx0_avg] <<= 21; + + tx0_avg++; + } else { + cal0_retry++; + if (cal0_retry == 10) + break; + } + + if (!(tx1_fail || tx1_finish)) { + rtw_write32(rtwdev, REG_RFECTL_B, 0x02000000); + tx_x1_temp[tx1_avg] = rtw_read32_mask(rtwdev, + REG_IQKB_END, + 0x07ff0000); + rtw_write32(rtwdev, REG_RFECTL_B, 0x04000000); + tx_y1_temp[tx1_avg] = rtw_read32_mask(rtwdev, + REG_IQKB_END, + 0x07ff0000); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "tx_x1[%d] = %x ;; tx_y1[%d] = %x\n", + tx1_avg, tx_x1_temp[tx1_avg], + tx1_avg, tx_y1_temp[tx1_avg]); + + tx_x1_temp[tx1_avg] <<= 21; + tx_y1_temp[tx1_avg] <<= 21; + + tx1_avg++; + } else { + cal1_retry++; + if (cal1_retry == 10) + break; + } + } else { + cal0_retry++; + cal1_retry++; + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "delay 20ms TX IQK Not Ready!!!!!\n"); + + if (cal0_retry == 10) + break; + } + + if (tx0_avg >= 2) + tx0_finish = rtw88xxa_iqk_finish(tx0_avg, 4, + tx_x0_temp, tx_y0_temp, &tx_x0, &tx_y0, + false, false); + + if (tx1_avg >= 2) + tx1_finish = rtw88xxa_iqk_finish(tx1_avg, 4, + tx_x1_temp, tx_y1_temp, &tx_x1, &tx_y1, + false, false); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "tx0_average = %d, tx1_average = %d\n", + tx0_avg, tx1_avg); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "tx0_finish = %d, tx1_finish = %d\n", + tx0_finish, tx1_finish); + + if (tx0_finish && tx1_finish) + break; + + if ((cal0_retry + tx0_avg) >= 10 || + (cal1_retry + tx1_avg) >= 10) + break; + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "TXA_cal_retry = %d\n", cal0_retry); + rtw_dbg(rtwdev, RTW_DBG_RFK, "TXB_cal_retry = %d\n", cal1_retry); + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + /* Load LOK */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_TXMOD, 0x7fe00, + rtw_read_rf(rtwdev, RF_PATH_A, RF_DTXLOK, 0xffc00)); + rtw_write_rf(rtwdev, RF_PATH_B, RF_TXMOD, 0x7fe00, + rtw_read_rf(rtwdev, RF_PATH_B, RF_DTXLOK, 0xffc00)); + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + if (tx0_finish) { + /* ====== path A RX IQK RF setting====== */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_ADDR, RFREG_MASK, + 0x30000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA0, RFREG_MASK, + 0x3f7ff); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA1, RFREG_MASK, + 0xfe7bf); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RXBB2, RFREG_MASK, 0x88001); + rtw_write_rf(rtwdev, RF_PATH_A, RF_TXA_PREPAD, RFREG_MASK, 0x931d1); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x00000); + } + if (tx1_finish) { + /* ====== path B RX IQK RF setting====== */ + rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE, RFREG_MASK, 0x80000); + rtw_write_rf(rtwdev, RF_PATH_B, RF_MODE_TABLE_ADDR, RFREG_MASK, + 0x30000); + rtw_write_rf(rtwdev, RF_PATH_B, RF_MODE_TABLE_DATA0, RFREG_MASK, + 0x3f7ff); + rtw_write_rf(rtwdev, RF_PATH_B, RF_MODE_TABLE_DATA1, RFREG_MASK, + 0xfe7bf); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RXBB2, RFREG_MASK, 0x88001); + rtw_write_rf(rtwdev, RF_PATH_B, RF_TXA_PREPAD, RFREG_MASK, 0x931d1); + rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE, RFREG_MASK, 0x00000); + } + + rtw_write32_mask(rtwdev, REG_IQK_COM00, BIT(31), 0x1); + rtw_write32_mask(rtwdev, REG_IQK_COM00, BIT(31), 0x0); + rtw_write32(rtwdev, REG_DAC_RSTB, 0x00008000); + + if (rtwdev->hci.type == RTW_HCI_TYPE_PCIE) + rtw_write32(rtwdev, REG_IQK_COM96, 0x0046a911); + else + rtw_write32(rtwdev, REG_IQK_COM96, 0x0046a890); + + if (efuse->rfe_option == 1) { + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777717); + rtw_write32(rtwdev, REG_RFE_INV_A, 0x00000077); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777717); + rtw_write32(rtwdev, REG_RFE_INV_B, 0x00000077); + } else { + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777717); + rtw_write32(rtwdev, REG_RFE_INV_A, 0x02000077); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777717); + rtw_write32(rtwdev, REG_RFE_INV_B, 0x02000077); + } + + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + if (tx0_finish) { + /* TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtw_write32(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, 0x38008c10); + /* RX_Tone_idx[9:0], RxK_Mask[29] */ + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x18008c10); + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, 0x82140119); + } + if (tx1_finish) { + /* TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtw_write32(rtwdev, REG_TXTONEB, 0x38008c10); + /* RX_Tone_idx[9:0], RxK_Mask[29] */ + rtw_write32(rtwdev, REG_RXTONEB, 0x18008c10); + rtw_write32(rtwdev, REG_TXPITMB, 0x82140119); + } + + cal0_retry = 0; + cal1_retry = 0; + while (1) { + /* one shot */ + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + if (tx0_finish) { + rtw_write32_mask(rtwdev, REG_IQK_COM00, 0x03FF8000, + tx_x0 & 0x000007ff); + rtw_write32_mask(rtwdev, REG_IQK_COM00, 0x000007FF, + tx_y0 & 0x000007ff); + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + if (efuse->rfe_option == 1) + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x28161500); + else + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x28160cc0); + rtw_write32(rtwdev, REG_RFECTL_A, 0x00300000); + rtw_write32(rtwdev, REG_RFECTL_A, 0x00100000); + mdelay(5); + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x3c000000); + rtw_write32(rtwdev, REG_RFECTL_A, 0x00000000); + } + + if (tx1_finish) { + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + rtw_write32_mask(rtwdev, REG_IQK_COM00, 0x03FF8000, + tx_x1 & 0x000007ff); + rtw_write32_mask(rtwdev, REG_IQK_COM00, 0x000007FF, + tx_y1 & 0x000007ff); + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + if (efuse->rfe_option == 1) + rtw_write32(rtwdev, REG_RXPITMB, 0x28161500); + else + rtw_write32(rtwdev, REG_RXPITMB, 0x28160ca0); + rtw_write32(rtwdev, REG_RFECTL_B, 0x00300000); + rtw_write32(rtwdev, REG_RFECTL_B, 0x00100000); + mdelay(5); + rtw_write32(rtwdev, REG_RXPITMB, 0x3c000000); + rtw_write32(rtwdev, REG_RFECTL_B, 0x00000000); + } + + for (delay_count = 0; delay_count < 20; delay_count++) { + if (!rx0_finish && tx0_finish) + iqk0_ready = rtw_read32_mask(rtwdev, + REG_IQKA_END, + BIT(10)); + if (!rx1_finish && tx1_finish) + iqk1_ready = rtw_read32_mask(rtwdev, + REG_IQKB_END, + BIT(10)); + if (iqk0_ready && iqk1_ready) + break; + + mdelay(1); + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "RX delay_count = %d\n", + delay_count); + + if (delay_count < 20) { /* If 20ms No Result, then cal_retry++ */ + /* ============RXIQK Check============== */ + rx0_fail = rtw_read32_mask(rtwdev, REG_IQKA_END, BIT(11)); + rx1_fail = rtw_read32_mask(rtwdev, REG_IQKB_END, BIT(11)); + + if (!(rx0_fail || rx0_finish) && tx0_finish) { + rtw_write32(rtwdev, REG_RFECTL_A, 0x06000000); + rx_x0_temp[rx0_avg] = rtw_read32_mask(rtwdev, + REG_IQKA_END, + 0x07ff0000); + rtw_write32(rtwdev, REG_RFECTL_A, 0x08000000); + rx_y0_temp[rx0_avg] = rtw_read32_mask(rtwdev, + REG_IQKA_END, + 0x07ff0000); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "rx_x0[%d] = %x ;; rx_y0[%d] = %x\n", + rx0_avg, rx_x0_temp[rx0_avg], + rx0_avg, rx_y0_temp[rx0_avg]); + + rx_x0_temp[rx0_avg] <<= 21; + rx_y0_temp[rx0_avg] <<= 21; + + rx0_avg++; + } else { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "1. RXA_cal_retry = %d\n", cal0_retry); + + cal0_retry++; + if (cal0_retry == 10) + break; + } + + if (!(rx1_fail || rx1_finish) && tx1_finish) { + rtw_write32(rtwdev, REG_RFECTL_B, 0x06000000); + rx_x1_temp[rx1_avg] = rtw_read32_mask(rtwdev, + REG_IQKB_END, + 0x07ff0000); + rtw_write32(rtwdev, REG_RFECTL_B, 0x08000000); + rx_y1_temp[rx1_avg] = rtw_read32_mask(rtwdev, + REG_IQKB_END, + 0x07ff0000); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "rx_x1[%d] = %x ;; rx_y1[%d] = %x\n", + rx1_avg, rx_x1_temp[rx1_avg], + rx1_avg, rx_y1_temp[rx1_avg]); + + rx_x1_temp[rx1_avg] <<= 21; + rx_y1_temp[rx1_avg] <<= 21; + + rx1_avg++; + } else { + cal1_retry++; + if (cal1_retry == 10) + break; + } + } else { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "2. RXA_cal_retry = %d\n", cal0_retry); + + cal0_retry++; + cal1_retry++; + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "delay 20ms RX IQK Not Ready!!!!!\n"); + + if (cal0_retry == 10) + break; + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "3. RXA_cal_retry = %d\n", + cal0_retry); + + if (rx0_avg >= 2) + rx0_finish = rtw88xxa_iqk_finish(rx0_avg, 4, + rx_x0_temp, rx_y0_temp, + &rx_x0, &rx_y0, + true, false); + + if (rx1_avg >= 2) + rx1_finish = rtw88xxa_iqk_finish(rx1_avg, 4, + rx_x1_temp, rx_y1_temp, + &rx_x1, &rx_y1, + true, false); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "rx0_average = %d, rx1_average = %d\n", + rx0_avg, rx1_avg); + rtw_dbg(rtwdev, RTW_DBG_RFK, + "rx0_finish = %d, rx1_finish = %d\n", + rx0_finish, rx1_finish); + + if ((rx0_finish || !tx0_finish) && (rx1_finish || !tx1_finish)) + break; + + if ((cal0_retry + rx0_avg) >= 10 || + (cal1_retry + rx1_avg) >= 10 || + rx0_avg == 3 || rx1_avg == 3) + break; + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "RXA_cal_retry = %d\n", cal0_retry); + rtw_dbg(rtwdev, RTW_DBG_RFK, "RXB_cal_retry = %d\n", cal1_retry); + + /* FillIQK Result */ + rtw_dbg(rtwdev, RTW_DBG_RFK, "========Path_A =======\n"); + + if (tx0_finish) + rtw8812a_iqk_tx_fill(rtwdev, RF_PATH_A, tx_x0, tx_y0); + else + rtw8812a_iqk_tx_fill(rtwdev, RF_PATH_A, 0x200, 0x0); + + if (rx0_finish) + rtw8812a_iqk_rx_fill(rtwdev, RF_PATH_A, rx_x0, rx_y0); + else + rtw8812a_iqk_rx_fill(rtwdev, RF_PATH_A, 0x200, 0x0); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "========Path_B =======\n"); + + if (tx1_finish) + rtw8812a_iqk_tx_fill(rtwdev, RF_PATH_B, tx_x1, tx_y1); + else + rtw8812a_iqk_tx_fill(rtwdev, RF_PATH_B, 0x200, 0x0); + + if (rx1_finish) + rtw8812a_iqk_rx_fill(rtwdev, RF_PATH_B, rx_x1, rx_y1); + else + rtw8812a_iqk_rx_fill(rtwdev, RF_PATH_B, 0x200, 0x0); +} + +#define MACBB_REG_NUM_8812A 9 +#define AFE_REG_NUM_8812A 12 +#define RF_REG_NUM_8812A 3 + +static void rtw8812a_do_iqk(struct rtw_dev *rtwdev) +{ + static const u32 backup_macbb_reg[MACBB_REG_NUM_8812A] = { + 0x520, 0x550, 0x808, 0xa04, 0x90c, 0xc00, 0xe00, 0x838, 0x82c + }; + static const u32 backup_afe_reg[AFE_REG_NUM_8812A] = { + 0xc5c, 0xc60, 0xc64, 0xc68, 0xcb0, 0xcb4, + 0xe5c, 0xe60, 0xe64, 0xe68, 0xeb0, 0xeb4 + }; + static const u32 backup_rf_reg[RF_REG_NUM_8812A] = { + 0x65, 0x8f, 0x0 + }; + u32 macbb_backup[MACBB_REG_NUM_8812A] = {}; + u32 afe_backup[AFE_REG_NUM_8812A] = {}; + u32 rfa_backup[RF_REG_NUM_8812A] = {}; + u32 rfb_backup[RF_REG_NUM_8812A] = {}; + u32 reg_cb8, reg_eb8; + + rtw88xxa_iqk_backup_mac_bb(rtwdev, macbb_backup, + backup_macbb_reg, MACBB_REG_NUM_8812A); + + rtw_write32_set(rtwdev, REG_CCASEL, BIT(31)); + reg_cb8 = rtw_read32(rtwdev, REG_RFECTL_A); + reg_eb8 = rtw_read32(rtwdev, REG_RFECTL_B); + rtw_write32_clr(rtwdev, REG_CCASEL, BIT(31)); + + rtw88xxa_iqk_backup_afe(rtwdev, afe_backup, + backup_afe_reg, AFE_REG_NUM_8812A); + rtw8812a_iqk_backup_rf(rtwdev, rfa_backup, rfb_backup, + backup_rf_reg, RF_REG_NUM_8812A); + + rtw88xxa_iqk_configure_mac(rtwdev); + + rtw8812a_iqk(rtwdev); + + rtw8812a_iqk_restore_rf(rtwdev, RF_PATH_A, backup_rf_reg, + rfa_backup, RF_REG_NUM_8812A); + rtw8812a_iqk_restore_rf(rtwdev, RF_PATH_B, backup_rf_reg, + rfb_backup, RF_REG_NUM_8812A); + + rtw8812a_iqk_restore_afe(rtwdev, afe_backup, + backup_afe_reg, AFE_REG_NUM_8812A); + + rtw_write32_set(rtwdev, REG_CCASEL, BIT(31)); + rtw_write32(rtwdev, REG_RFECTL_A, reg_cb8); + rtw_write32(rtwdev, REG_RFECTL_B, reg_eb8); + rtw_write32_clr(rtwdev, REG_CCASEL, BIT(31)); + + rtw88xxa_iqk_restore_mac_bb(rtwdev, macbb_backup, + backup_macbb_reg, MACBB_REG_NUM_8812A); +} + +static void rtw8812a_phy_calibration(struct rtw_dev *rtwdev) +{ + u8 channel = rtwdev->hal.current_channel; + + rtw8812a_do_iqk(rtwdev); + + /* The official driver wants to do this after connecting + * but before first writing a new igi (phydm_get_new_igi). + * Here seems close enough. + */ + if (channel >= 36 && channel <= 64) + rtw_load_table(rtwdev, &rtw8812a_agc_diff_lb_tbl); + else if (channel >= 100) + rtw_load_table(rtwdev, &rtw8812a_agc_diff_hb_tbl); +} + +static void rtw8812a_pwr_track(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + if (!dm_info->pwr_trk_triggered) { + rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, + GENMASK(17, 16), 0x03); + dm_info->pwr_trk_triggered = true; + return; + } + + rtw88xxa_phy_pwrtrack(rtwdev, rtw8812a_do_lck, rtw8812a_do_iqk); + dm_info->pwr_trk_triggered = false; +} + +static void rtw8812a_fill_txdesc_checksum(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + u8 *txdesc) +{ + fill_txdesc_checksum_common(txdesc, 16); +} + +static void rtw8812a_coex_cfg_init(struct rtw_dev *rtwdev) +{ +} + +static void rtw8812a_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) +{ +} + +static void rtw8821a_coex_cfg_rfe_type(struct rtw_dev *rtwdev) +{ +} + +static void rtw8821a_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ +} + +static void rtw8821a_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{ +} + +static const struct rtw_chip_ops rtw8812a_ops = { + .power_on = rtw88xxa_power_on, + .power_off = rtw8812a_power_off, + .phy_set_param = NULL, + .read_efuse = rtw88xxa_read_efuse, + .query_phy_status = rtw8812a_query_phy_status, + .set_channel = rtw88xxa_set_channel, + .mac_init = NULL, + .read_rf = rtw88xxa_phy_read_rf, + .write_rf = rtw_phy_write_rf_reg_sipi, + .set_antenna = NULL, + .set_tx_power_index = rtw88xxa_set_tx_power_index, + .cfg_ldo25 = rtw8812a_cfg_ldo25, + .efuse_grant = rtw88xxa_efuse_grant, + .false_alarm_statistics = rtw88xxa_false_alarm_statistics, + .phy_calibration = rtw8812a_phy_calibration, + .cck_pd_set = rtw88xxa_phy_cck_pd_set, + .pwr_track = rtw8812a_pwr_track, + .config_bfee = NULL, + .set_gid_table = NULL, + .cfg_csi_rate = NULL, + .fill_txdesc_checksum = rtw8812a_fill_txdesc_checksum, + .coex_set_init = rtw8812a_coex_cfg_init, + .coex_set_ant_switch = NULL, + .coex_set_gnt_fix = rtw8812a_coex_cfg_gnt_fix, + .coex_set_gnt_debug = NULL, + .coex_set_rfe_type = rtw8821a_coex_cfg_rfe_type, + .coex_set_wl_tx_power = rtw8821a_coex_cfg_wl_tx_power, + .coex_set_wl_rx_gain = rtw8821a_coex_cfg_wl_rx_gain, +}; + +static const struct rtw_page_table page_table_8812a[] = { + /* hq_num, nq_num, lq_num, exq_num, gapq_num */ + {0, 0, 0, 0, 0}, /* SDIO */ + {0, 0, 0, 0, 0}, /* PCI */ + {16, 0, 0, 0, 1}, /* 2 bulk out endpoints */ + {16, 0, 16, 0, 1}, /* 3 bulk out endpoints */ + {16, 0, 16, 0, 1}, /* 4 bulk out endpoints */ +}; + +static const struct rtw_rqpn rqpn_table_8812a[] = { + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, + + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, + + {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH, + RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + + {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, +}; + +static const struct rtw_prioq_addrs prioq_addrs_8812a = { + .prio[RTW_DMA_MAPPING_EXTRA] = { + .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3, + }, + .prio[RTW_DMA_MAPPING_LOW] = { + .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1, + }, + .prio[RTW_DMA_MAPPING_NORMAL] = { + .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1, + }, + .prio[RTW_DMA_MAPPING_HIGH] = { + .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2, + }, + .wsize = false, +}; + +static const struct rtw_hw_reg rtw8812a_dig[] = { + [0] = { .addr = REG_RXIGI_A, .mask = 0x7f }, + [1] = { .addr = REG_RXIGI_B, .mask = 0x7f }, +}; + +static const struct rtw_rfe_def rtw8812a_rfe_defs[] = { + [0] = { .phy_pg_tbl = &rtw8812a_bb_pg_tbl, + .txpwr_lmt_tbl = &rtw8812a_txpwr_lmt_tbl, + .pwr_track_tbl = &rtw8812a_rtw_pwr_track_tbl, }, + [1] = { .phy_pg_tbl = &rtw8812a_bb_pg_tbl, + .txpwr_lmt_tbl = &rtw8812a_txpwr_lmt_tbl, + .pwr_track_tbl = &rtw8812a_rtw_pwr_track_tbl, }, + [3] = { .phy_pg_tbl = &rtw8812a_bb_pg_rfe3_tbl, + .txpwr_lmt_tbl = &rtw8812a_txpwr_lmt_tbl, + .pwr_track_tbl = &rtw8812a_rtw_pwr_track_rfe3_tbl, }, +}; + +static const u8 wl_rssi_step_8812a[] = {101, 45, 101, 40}; +static const u8 bt_rssi_step_8812a[] = {101, 101, 101, 101}; + +static const struct coex_rf_para rf_para_tx_8812a[] = { + {0, 0, false, 7}, /* for normal */ + {0, 20, false, 7}, /* for WL-CPT */ + {8, 17, true, 4}, + {7, 18, true, 4}, + {6, 19, true, 4}, + {5, 20, true, 4} +}; + +static const struct coex_rf_para rf_para_rx_8812a[] = { + {0, 0, false, 7}, /* for normal */ + {0, 20, false, 7}, /* for WL-CPT */ + {3, 24, true, 5}, + {2, 26, true, 5}, + {1, 27, true, 5}, + {0, 28, true, 5} +}; + +static_assert(ARRAY_SIZE(rf_para_tx_8812a) == ARRAY_SIZE(rf_para_rx_8812a)); + +const struct rtw_chip_info rtw8812a_hw_spec = { + .ops = &rtw8812a_ops, + .id = RTW_CHIP_TYPE_8812A, + .fw_name = "rtw88/rtw8812a_fw.bin", + .wlan_cpu = RTW_WCPU_11N, + .tx_pkt_desc_sz = 40, + .tx_buf_desc_sz = 16, + .rx_pkt_desc_sz = 24, + .rx_buf_desc_sz = 8, + .phy_efuse_size = 512, + .log_efuse_size = 512, + .ptct_efuse_size = 96 + 1, /* TODO or just 18? */ + .txff_size = 131072, + .rxff_size = 16128, + .rsvd_drv_pg_num = 9, + .txgi_factor = 1, + .is_pwr_by_rate_dec = true, + .max_power_index = 0x3f, + .csi_buf_pg_num = 0, + .band = RTW_BAND_2G | RTW_BAND_5G, + .page_size = 512, + .dig_min = 0x20, + .ht_supported = true, + .vht_supported = true, + .lps_deep_mode_supported = 0, + .sys_func_en = 0xFD, + .pwr_on_seq = card_enable_flow_8812a, + .pwr_off_seq = card_disable_flow_8812a, + .page_table = page_table_8812a, + .rqpn_table = rqpn_table_8812a, + .prioq_addrs = &prioq_addrs_8812a, + .intf_table = NULL, + .dig = rtw8812a_dig, + .rf_sipi_addr = {REG_LSSI_WRITE_A, REG_LSSI_WRITE_B}, + .ltecoex_addr = NULL, + .mac_tbl = &rtw8812a_mac_tbl, + .agc_tbl = &rtw8812a_agc_tbl, + .bb_tbl = &rtw8812a_bb_tbl, + .rf_tbl = {&rtw8812a_rf_a_tbl, &rtw8812a_rf_b_tbl}, + .rfe_defs = rtw8812a_rfe_defs, + .rfe_defs_size = ARRAY_SIZE(rtw8812a_rfe_defs), + .rx_ldpc = false, + .hw_feature_report = false, + .c2h_ra_report_size = 4, + .old_datarate_fb_limit = true, + .usb_tx_agg_desc_num = 1, + .iqk_threshold = 8, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, + .max_scan_ie_len = IEEE80211_MAX_DATA_LEN, + + .coex_para_ver = 0, /* no coex code in 8812au driver */ + .bt_desired_ver = 0, + .scbd_support = false, + .new_scbd10_def = false, + .ble_hid_profile_support = false, + .wl_mimo_ps_support = false, + .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, + .bt_rssi_type = COEX_BTRSSI_RATIO, + .ant_isolation = 15, + .rssi_tolerance = 2, + .wl_rssi_step = wl_rssi_step_8812a, + .bt_rssi_step = bt_rssi_step_8812a, + .table_sant_num = 0, + .table_sant = NULL, + .table_nsant_num = 0, + .table_nsant = NULL, + .tdma_sant_num = 0, + .tdma_sant = NULL, + .tdma_nsant_num = 0, + .tdma_nsant = NULL, + .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8812a), + .wl_rf_para_tx = rf_para_tx_8812a, + .wl_rf_para_rx = rf_para_rx_8812a, + .bt_afh_span_bw20 = 0x20, + .bt_afh_span_bw40 = 0x30, + .afh_5g_num = 0, + .afh_5g = NULL, + .coex_info_hw_regs_num = 0, + .coex_info_hw_regs = NULL, +}; +EXPORT_SYMBOL(rtw8812a_hw_spec); + +MODULE_FIRMWARE("rtw88/rtw8812a_fw.bin"); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8812a driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a.h b/drivers/net/wireless/realtek/rtw88/rtw8812a.h new file mode 100644 index 000000000000..82dab59e341d --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8812a.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW8812A_H__ +#define __RTW8812A_H__ + +extern const struct rtw_chip_info rtw8812a_hw_spec; + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a_table.c b/drivers/net/wireless/realtek/rtw88/rtw8812a_table.c new file mode 100644 index 000000000000..048efbbd49ed --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8812a_table.c @@ -0,0 +1,2812 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "main.h" +#include "phy.h" +#include "rtw8812a_table.h" + +static const u32 rtw8812a_mac[] = { + 0x010, 0x0000000C, + 0x80000200, 0x00000000, 0x40000000, 0x00000000, + 0x011, 0x00000066, + 0xA0000000, 0x00000000, + 0x011, 0x0000005A, + 0xB0000000, 0x00000000, + 0x025, 0x0000000F, + 0x072, 0x00000000, + 0x420, 0x00000080, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000002, + 0x435, 0x00000003, + 0x436, 0x00000005, + 0x437, 0x00000007, + 0x438, 0x00000000, + 0x439, 0x00000000, + 0x43A, 0x00000000, + 0x43B, 0x00000001, + 0x43C, 0x00000002, + 0x43D, 0x00000003, + 0x43E, 0x00000005, + 0x43F, 0x00000007, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x45B, 0x00000080, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x000000FF, + 0x4C9, 0x00000008, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x604, 0x00000009, + 0x605, 0x00000030, + 0x607, 0x00000003, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000080, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x718, 0x00000040, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8812a_mac, rtw_phy_cfg_mac); + +static const u32 rtw8812a_agc[] = { + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFC000001, + 0x81C, 0xFB020001, + 0x81C, 0xFA040001, + 0x81C, 0xF9060001, + 0x81C, 0xF8080001, + 0x81C, 0xF70A0001, + 0x81C, 0xF60C0001, + 0x81C, 0xF50E0001, + 0x81C, 0xF4100001, + 0x81C, 0xF3120001, + 0x81C, 0xF2140001, + 0x81C, 0xF1160001, + 0x81C, 0xF0180001, + 0x81C, 0xEF1A0001, + 0x81C, 0xEE1C0001, + 0x81C, 0xED1E0001, + 0x81C, 0xEC200001, + 0x81C, 0xEB220001, + 0x81C, 0xEA240001, + 0x81C, 0xCD260001, + 0x81C, 0xCC280001, + 0x81C, 0xCB2A0001, + 0x81C, 0xCA2C0001, + 0x81C, 0xC92E0001, + 0x81C, 0xC8300001, + 0x81C, 0xA6320001, + 0x81C, 0xA5340001, + 0x81C, 0xA4360001, + 0x81C, 0xA3380001, + 0x81C, 0xA23A0001, + 0x81C, 0x883C0001, + 0x81C, 0x873E0001, + 0x81C, 0x86400001, + 0x81C, 0x85420001, + 0x81C, 0x84440001, + 0x81C, 0x83460001, + 0x81C, 0x82480001, + 0x81C, 0x814A0001, + 0x81C, 0x484C0001, + 0x81C, 0x474E0001, + 0x81C, 0x46500001, + 0x81C, 0x45520001, + 0x81C, 0x44540001, + 0x81C, 0x43560001, + 0x81C, 0x42580001, + 0x81C, 0x415A0001, + 0x81C, 0x255C0001, + 0x81C, 0x245E0001, + 0x81C, 0x23600001, + 0x81C, 0x22620001, + 0x81C, 0x21640001, + 0x81C, 0x21660001, + 0x81C, 0x21680001, + 0x81C, 0x216A0001, + 0x81C, 0x216C0001, + 0x81C, 0x216E0001, + 0x81C, 0x21700001, + 0x81C, 0x21720001, + 0x81C, 0x21740001, + 0x81C, 0x21760001, + 0x81C, 0x21780001, + 0x81C, 0x217A0001, + 0x81C, 0x217C0001, + 0x81C, 0x217E0001, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, + 0x81C, 0xF9000001, + 0x81C, 0xF8020001, + 0x81C, 0xF7040001, + 0x81C, 0xF6060001, + 0x81C, 0xF5080001, + 0x81C, 0xF40A0001, + 0x81C, 0xF30C0001, + 0x81C, 0xF20E0001, + 0x81C, 0xF1100001, + 0x81C, 0xF0120001, + 0x81C, 0xEF140001, + 0x81C, 0xEE160001, + 0x81C, 0xED180001, + 0x81C, 0xEC1A0001, + 0x81C, 0xEB1C0001, + 0x81C, 0xEA1E0001, + 0x81C, 0xCD200001, + 0x81C, 0xCC220001, + 0x81C, 0xCB240001, + 0x81C, 0xCA260001, + 0x81C, 0xC9280001, + 0x81C, 0xC82A0001, + 0x81C, 0xC72C0001, + 0x81C, 0xC62E0001, + 0x81C, 0xA5300001, + 0x81C, 0xA4320001, + 0x81C, 0xA3340001, + 0x81C, 0xA2360001, + 0x81C, 0x88380001, + 0x81C, 0x873A0001, + 0x81C, 0x863C0001, + 0x81C, 0x853E0001, + 0x81C, 0x84400001, + 0x81C, 0x83420001, + 0x81C, 0x82440001, + 0x81C, 0x81460001, + 0x81C, 0x48480001, + 0x81C, 0x474A0001, + 0x81C, 0x464C0001, + 0x81C, 0x454E0001, + 0x81C, 0x44500001, + 0x81C, 0x43520001, + 0x81C, 0x42540001, + 0x81C, 0x41560001, + 0x81C, 0x25580001, + 0x81C, 0x245A0001, + 0x81C, 0x235C0001, + 0x81C, 0x225E0001, + 0x81C, 0x21600001, + 0x81C, 0x21620001, + 0x81C, 0x21640001, + 0x81C, 0x21660001, + 0x81C, 0x21680001, + 0x81C, 0x216A0001, + 0x81C, 0x236C0001, + 0x81C, 0x226E0001, + 0x81C, 0x21700001, + 0x81C, 0x21720001, + 0x81C, 0x21740001, + 0x81C, 0x21760001, + 0x81C, 0x21780001, + 0x81C, 0x217A0001, + 0x81C, 0x217C0001, + 0x81C, 0x217E0001, + 0xA0000000, 0x00000000, + 0x81C, 0xFF000001, + 0x81C, 0xFF020001, + 0x81C, 0xFF040001, + 0x81C, 0xFF060001, + 0x81C, 0xFF080001, + 0x81C, 0xFE0A0001, + 0x81C, 0xFD0C0001, + 0x81C, 0xFC0E0001, + 0x81C, 0xFB100001, + 0x81C, 0xFA120001, + 0x81C, 0xF9140001, + 0x81C, 0xF8160001, + 0x81C, 0xF7180001, + 0x81C, 0xF61A0001, + 0x81C, 0xF51C0001, + 0x81C, 0xF41E0001, + 0x81C, 0xF3200001, + 0x81C, 0xF2220001, + 0x81C, 0xF1240001, + 0x81C, 0xF0260001, + 0x81C, 0xEF280001, + 0x81C, 0xEE2A0001, + 0x81C, 0xED2C0001, + 0x81C, 0xEC2E0001, + 0x81C, 0xEB300001, + 0x81C, 0xEA320001, + 0x81C, 0xE9340001, + 0x81C, 0xE8360001, + 0x81C, 0xE7380001, + 0x81C, 0xE63A0001, + 0x81C, 0xE53C0001, + 0x81C, 0xC73E0001, + 0x81C, 0xC6400001, + 0x81C, 0xC5420001, + 0x81C, 0xC4440001, + 0x81C, 0xC3460001, + 0x81C, 0xC2480001, + 0x81C, 0xC14A0001, + 0x81C, 0xA74C0001, + 0x81C, 0xA64E0001, + 0x81C, 0xA5500001, + 0x81C, 0xA4520001, + 0x81C, 0xA3540001, + 0x81C, 0xA2560001, + 0x81C, 0xA1580001, + 0x81C, 0x675A0001, + 0x81C, 0x665C0001, + 0x81C, 0x655E0001, + 0x81C, 0x64600001, + 0x81C, 0x63620001, + 0x81C, 0x48640001, + 0x81C, 0x47660001, + 0x81C, 0x46680001, + 0x81C, 0x456A0001, + 0x81C, 0x446C0001, + 0x81C, 0x436E0001, + 0x81C, 0x42700001, + 0x81C, 0x41720001, + 0x81C, 0x41740001, + 0x81C, 0x41760001, + 0x81C, 0x41780001, + 0x81C, 0x417A0001, + 0x81C, 0x417C0001, + 0x81C, 0x417E0001, + 0xB0000000, 0x00000000, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFC800001, + 0x81C, 0xFB820001, + 0x81C, 0xFA840001, + 0x81C, 0xF9860001, + 0x81C, 0xF8880001, + 0x81C, 0xF78A0001, + 0x81C, 0xF68C0001, + 0x81C, 0xF58E0001, + 0x81C, 0xF4900001, + 0x81C, 0xF3920001, + 0x81C, 0xF2940001, + 0x81C, 0xF1960001, + 0x81C, 0xF0980001, + 0x81C, 0xEF9A0001, + 0x81C, 0xEE9C0001, + 0x81C, 0xED9E0001, + 0x81C, 0xECA00001, + 0x81C, 0xEBA20001, + 0x81C, 0xEAA40001, + 0x81C, 0xE9A60001, + 0x81C, 0xE8A80001, + 0x81C, 0xE7AA0001, + 0x81C, 0xE6AC0001, + 0x81C, 0xE5AE0001, + 0x81C, 0xE4B00001, + 0x81C, 0xE3B20001, + 0x81C, 0xA8B40001, + 0x81C, 0xA7B60001, + 0x81C, 0xA6B80001, + 0x81C, 0xA5BA0001, + 0x81C, 0xA4BC0001, + 0x81C, 0xA3BE0001, + 0x81C, 0xA2C00001, + 0x81C, 0xA1C20001, + 0x81C, 0x68C40001, + 0x81C, 0x67C60001, + 0x81C, 0x66C80001, + 0x81C, 0x65CA0001, + 0x81C, 0x64CC0001, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0x81C, 0x01E80001, + 0x81C, 0x01EA0001, + 0x81C, 0x01EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xA0000000, 0x00000000, + 0x81C, 0xFF800001, + 0x81C, 0xFF820001, + 0x81C, 0xFF840001, + 0x81C, 0xFE860001, + 0x81C, 0xFD880001, + 0x81C, 0xFC8A0001, + 0x81C, 0xFB8C0001, + 0x81C, 0xFA8E0001, + 0x81C, 0xF9900001, + 0x81C, 0xF8920001, + 0x81C, 0xF7940001, + 0x81C, 0xF6960001, + 0x81C, 0xF5980001, + 0x81C, 0xF49A0001, + 0x81C, 0xF39C0001, + 0x81C, 0xF29E0001, + 0x81C, 0xF1A00001, + 0x81C, 0xF0A20001, + 0x81C, 0xEFA40001, + 0x81C, 0xEEA60001, + 0x81C, 0xEDA80001, + 0x81C, 0xECAA0001, + 0x81C, 0xEBAC0001, + 0x81C, 0xEAAE0001, + 0x81C, 0xE9B00001, + 0x81C, 0xE8B20001, + 0x81C, 0xE7B40001, + 0x81C, 0xE6B60001, + 0x81C, 0xE5B80001, + 0x81C, 0xE4BA0001, + 0x81C, 0xE3BC0001, + 0x81C, 0xA8BE0001, + 0x81C, 0xA7C00001, + 0x81C, 0xA6C20001, + 0x81C, 0xA5C40001, + 0x81C, 0xA4C60001, + 0x81C, 0xA3C80001, + 0x81C, 0xA2CA0001, + 0x81C, 0xA1CC0001, + 0x81C, 0x68CE0001, + 0x81C, 0x67D00001, + 0x81C, 0x66D20001, + 0x81C, 0x65D40001, + 0x81C, 0x64D60001, + 0x81C, 0x47D80001, + 0x81C, 0x46DA0001, + 0x81C, 0x45DC0001, + 0x81C, 0x44DE0001, + 0x81C, 0x43E00001, + 0x81C, 0x42E20001, + 0x81C, 0x08E40001, + 0x81C, 0x07E60001, + 0x81C, 0x06E80001, + 0x81C, 0x05EA0001, + 0x81C, 0x04EC0001, + 0x81C, 0x03EE0001, + 0x81C, 0x02F00001, + 0x81C, 0x01F20001, + 0x81C, 0x01F40001, + 0x81C, 0x01F60001, + 0x81C, 0x01F80001, + 0x81C, 0x01FA0001, + 0x81C, 0x01FC0001, + 0x81C, 0x01FE0001, + 0xB0000000, 0x00000000, + 0xC50, 0x00000022, + 0xC50, 0x00000020, + 0xE50, 0x00000022, + 0xE50, 0x00000020, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8812a_agc, rtw_phy_cfg_agc); + +static const u32 rtw8812a_agc_diff_lb[] = { + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0x47CE0001, + 0x81C, 0x46D00001, + 0x81C, 0x45D20001, + 0x81C, 0x44D40001, + 0x81C, 0x43D60001, + 0x81C, 0x42D80001, + 0x81C, 0x08DA0001, + 0x81C, 0x07DC0001, + 0x81C, 0x06DE0001, + 0x81C, 0x05E00001, + 0x81C, 0x04E20001, + 0x81C, 0x03E40001, + 0x81C, 0x02E60001, + 0xA0000000, 0x00000000, + 0x81C, 0x47D80001, + 0x81C, 0x46DA0001, + 0x81C, 0x45DC0001, + 0x81C, 0x44DE0001, + 0x81C, 0x43E00001, + 0x81C, 0x42E20001, + 0x81C, 0x08E40001, + 0x81C, 0x07E60001, + 0x81C, 0x06E80001, + 0x81C, 0x05EA0001, + 0x81C, 0x04EC0001, + 0x81C, 0x03EE0001, + 0x81C, 0x02F00001, + 0xB0000000, 0x00000000, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8812a_agc_diff_lb, rtw_phy_cfg_agc); + +static const u32 rtw8812a_agc_diff_hb[] = { + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0x45CE0001, + 0x81C, 0x44D00001, + 0x81C, 0x43D20001, + 0x81C, 0x42D40001, + 0x81C, 0x08D60001, + 0x81C, 0x07D80001, + 0x81C, 0x06DA0001, + 0x81C, 0x05DC0001, + 0x81C, 0x04DE0001, + 0x81C, 0x03E00001, + 0x81C, 0x02E20001, + 0x81C, 0x01E40001, + 0x81C, 0x01E60001, + 0xA0000000, 0x00000000, + 0x81C, 0x45D80001, + 0x81C, 0x44DA0001, + 0x81C, 0x43DC0001, + 0x81C, 0x42DE0001, + 0x81C, 0x08E00001, + 0x81C, 0x07E20001, + 0x81C, 0x06E40001, + 0x81C, 0x05E60001, + 0x81C, 0x04E80001, + 0x81C, 0x03EA0001, + 0x81C, 0x02EC0001, + 0x81C, 0x01EE0001, + 0x81C, 0x01F00001, + 0xB0000000, 0x00000000, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8812a_agc_diff_hb, rtw_phy_cfg_agc); + +static const u32 rtw8812a_bb[] = { + 0x800, 0x8020D010, + 0x804, 0x080112E0, + 0x808, 0x0E028233, + 0x80C, 0x12131113, + 0x810, 0x20101263, + 0x814, 0x020C3D10, + 0x818, 0x03A00385, + 0x820, 0x00000000, + 0x824, 0x00030FE0, + 0x828, 0x00000000, + 0x82C, 0x002083DD, + 0x830, 0x2EAAEEB8, + 0x834, 0x0037A706, + 0x838, 0x06C89B44, + 0x83C, 0x0000095B, + 0x840, 0xC0000001, + 0x844, 0x40003CDE, + 0x848, 0x6210FF8B, + 0x84C, 0x6CFDFFB8, + 0x850, 0x28874706, + 0x854, 0x0001520C, + 0x858, 0x8060E000, + 0x85C, 0x74210168, + 0x860, 0x6929C321, + 0x864, 0x79727432, + 0x868, 0x8CA7A314, + 0x86C, 0x338C2878, + 0x870, 0x03333333, + 0x874, 0x31602C2E, + 0x878, 0x00003152, + 0x87C, 0x000FC000, + 0x8A0, 0x00000013, + 0x8A4, 0x7F7F7F7F, + 0x8A8, 0xA202033E, + 0x8AC, 0x0FF0FA0A, + 0x8B0, 0x00000600, + 0x8B4, 0x000FC080, + 0x8B8, 0x6C10D7FF, + 0x8BC, 0x4CA520A3, + 0x8C0, 0x27F00020, + 0x8C4, 0x00000000, + 0x8C8, 0x00012D69, + 0x8CC, 0x08248492, + 0x8D0, 0x0000B800, + 0x8DC, 0x00000000, + 0x8D4, 0x940008A0, + 0x8D8, 0x290B5612, + 0x8F8, 0x400002C0, + 0x8FC, 0x00000000, + 0x900, 0x00000701, + 0x90C, 0x00000000, + 0x910, 0x0000FC00, + 0x914, 0x00000404, + 0x918, 0x1C1028C0, + 0x91C, 0x64B11A1C, + 0x920, 0xE0767233, + 0x924, 0x055AA500, + 0x928, 0x00000004, + 0x92C, 0xFFFE0000, + 0x930, 0xFFFFFFFE, + 0x934, 0x001FFFFF, + 0x960, 0x00000000, + 0x964, 0x00000000, + 0x968, 0x00000000, + 0x96C, 0x00000000, + 0x970, 0x801FFFFF, + 0x978, 0x00000000, + 0x97C, 0x00000000, + 0x980, 0x00000000, + 0x984, 0x00000000, + 0x988, 0x00000000, + 0x990, 0x27100000, + 0x994, 0xFFFF0100, + 0x998, 0xFFFFFF5C, + 0x99C, 0xFFFFFFFF, + 0x9A0, 0x000000FF, + 0x9A4, 0x00080080, + 0x9A8, 0x00000000, + 0x9AC, 0x00000000, + 0x9B0, 0x81081008, + 0x9B4, 0x00000000, + 0x9B8, 0x01081008, + 0x9BC, 0x01081008, + 0x9D0, 0x00000000, + 0x9D4, 0x00000000, + 0x9D8, 0x00000000, + 0x9DC, 0x00000000, + 0x9E4, 0x00000003, + 0x9E8, 0x000002D5, + 0xA00, 0x00D047C8, + 0xA04, 0x01FF000C, + 0xA08, 0x8C838300, + 0xA0C, 0x2E7F000F, + 0xA10, 0x9500BB78, + 0xA14, 0x11144028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0000, + 0xA24, 0x090E1217, + 0xA28, 0x00000305, + 0xA2C, 0x00900000, + 0xA70, 0x101FFF00, + 0xA74, 0x00000008, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x218075B2, + 0xA84, 0x001F8C80, + 0xB00, 0x03100000, + 0xB04, 0x0000B000, + 0xB08, 0xAE0201EB, + 0xB0C, 0x01003207, + 0xB10, 0x00009807, + 0xB14, 0x01000000, + 0xB18, 0x00000002, + 0xB1C, 0x00000002, + 0xB20, 0x0000001F, + 0xB24, 0x03020100, + 0xB28, 0x07060504, + 0xB2C, 0x0B0A0908, + 0xB30, 0x0F0E0D0C, + 0xB34, 0x13121110, + 0xB38, 0x17161514, + 0xB3C, 0x0000003A, + 0xB40, 0x00000000, + 0xB44, 0x00000000, + 0xB48, 0x13000032, + 0xB4C, 0x48080000, + 0xB50, 0x00000000, + 0xB54, 0x00000000, + 0xB58, 0x00000000, + 0xB5C, 0x00000000, + 0xC00, 0x00000007, + 0xC04, 0x00042020, + 0xC08, 0x80410231, + 0xC0C, 0x00000000, + 0xC10, 0x00000100, + 0xC14, 0x01000000, + 0xC1C, 0x40000003, + 0xC20, 0x12121212, + 0xC24, 0x12121212, + 0xC28, 0x12121212, + 0xC2C, 0x12121212, + 0xC30, 0x12121212, + 0xC34, 0x12121212, + 0xC38, 0x12121212, + 0xC3C, 0x12121212, + 0xC40, 0x12121212, + 0xC44, 0x12121212, + 0xC48, 0x12121212, + 0xC4C, 0x12121212, + 0xC50, 0x00000020, + 0xC54, 0x0008121C, + 0xC58, 0x30000C1C, + 0xC5C, 0x00000058, + 0xC60, 0x34344443, + 0xC64, 0x07003333, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0x90000001, 0x00000000, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, + 0xC68, 0x59791979, + 0xA0000000, 0x00000000, + 0xC68, 0x59799979, + 0xB0000000, 0x00000000, + 0xC6C, 0x59795979, + 0xC70, 0x19795979, + 0xC74, 0x19795979, + 0xC78, 0x19791979, + 0xC7C, 0x19791979, + 0xC80, 0x19791979, + 0xC84, 0x19791979, + 0xC94, 0x0100005C, + 0xC98, 0x00000000, + 0xC9C, 0x00000000, + 0xCA0, 0x00000029, + 0xCA4, 0x08040201, + 0xCA8, 0x80402010, + 0xCB0, 0x77547777, + 0xCB4, 0x00000077, + 0xCB8, 0x00508242, + 0xE00, 0x00000007, + 0xE04, 0x00042020, + 0xE08, 0x80410231, + 0xE0C, 0x00000000, + 0xE10, 0x00000100, + 0xE14, 0x01000000, + 0xE1C, 0x40000003, + 0xE20, 0x12121212, + 0xE24, 0x12121212, + 0xE28, 0x12121212, + 0xE2C, 0x12121212, + 0xE30, 0x12121212, + 0xE34, 0x12121212, + 0xE38, 0x12121212, + 0xE3C, 0x12121212, + 0xE40, 0x12121212, + 0xE44, 0x12121212, + 0xE48, 0x12121212, + 0xE4C, 0x12121212, + 0xE50, 0x00000020, + 0xE54, 0x0008121C, + 0xE58, 0x30000C1C, + 0xE5C, 0x00000058, + 0xE60, 0x34344443, + 0xE64, 0x07003333, + 0xE68, 0x59791979, + 0xE6C, 0x59795979, + 0xE70, 0x19795979, + 0xE74, 0x19795979, + 0xE78, 0x19791979, + 0xE7C, 0x19791979, + 0xE80, 0x19791979, + 0xE84, 0x19791979, + 0xE94, 0x0100005C, + 0xE98, 0x00000000, + 0xE9C, 0x00000000, + 0xEA0, 0x00000029, + 0xEA4, 0x08040201, + 0xEA8, 0x80402010, + 0xEB0, 0x77547777, + 0xEB4, 0x00000077, + 0xEB8, 0x00508242, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8812a_bb, rtw_phy_cfg_bb); + +static const struct rtw_phy_pg_cfg_pair rtw8812a_bb_pg[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34363840, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x42424444, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323638, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303236, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x38404242, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283034, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303236, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x42422426, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30343840, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34363840, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x42424444, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323638, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303236, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x38404242, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283034, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303236, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x42422426, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30343840, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x42424444, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323640, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x40424444, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303236, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404242, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283034, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x40424444, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303236, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x42422426, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x30343840, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x42424444, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323640, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x40424444, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303236, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404242, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283034, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x40424444, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303236, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x42422426, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x30343840, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8812a_bb_pg); + +static const struct rtw_phy_pg_cfg_pair rtw8812a_bb_pg_rfe3[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x32323232, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303232, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x32323232, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303232, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x32323232, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303232, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x32323232, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x24262830, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x32323232, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303232, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x32323232, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303232, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x32323232, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303232, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x32323232, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x24262830, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x32323232, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x28303232, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32323232, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32323232, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x32322222, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303232, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22222426, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x32323232, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x28303232, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32323232, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32323232, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x32322222, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303232, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22222426, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8812a_bb_pg_rfe3); + +static const u32 rtw8812a_rf_a[] = { + 0x000, 0x00010000, + 0x018, 0x0001712A, + 0x056, 0x00051CF2, + 0x066, 0x00040000, + 0x01E, 0x00080000, + 0x089, 0x00000080, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x00014B3A, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, + 0x086, 0x00014B3A, + 0xA0000000, 0x00000000, + 0x086, 0x00014B38, + 0xB0000000, 0x00000000, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x08B, 0x00080180, + 0xA0000000, 0x00000000, + 0x08B, 0x00087180, + 0xB0000000, 0x00000000, + 0x0B1, 0x0001FC1A, + 0x0B3, 0x000F0810, + 0x0B4, 0x0001A78D, + 0x0BA, 0x00086180, + 0x018, 0x00000006, + 0x0EF, 0x00002000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xA0000000, 0x00000000, + 0x03B, 0x00038A58, + 0x03B, 0x00037A58, + 0x03B, 0x0002A590, + 0x03B, 0x00027A50, + 0x03B, 0x00018248, + 0x03B, 0x00010240, + 0x03B, 0x00008240, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000100, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A4EE, + 0x034, 0x00009076, + 0x034, 0x00008073, + 0x034, 0x00007070, + 0x034, 0x0000606D, + 0x034, 0x0000506A, + 0x034, 0x00004049, + 0x034, 0x00003046, + 0x034, 0x00002028, + 0x034, 0x00001025, + 0x034, 0x00000022, + 0xA0000000, 0x00000000, + 0x034, 0x0000ADF4, + 0x034, 0x00009DF1, + 0x034, 0x00008DEE, + 0x034, 0x00007DEB, + 0x034, 0x00006DE8, + 0x034, 0x00005DE5, + 0x034, 0x00004DE2, + 0x034, 0x00003CE6, + 0x034, 0x000024E7, + 0x034, 0x000014E4, + 0x034, 0x000004E1, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x0EF, 0x00000002, + 0x008, 0x00008400, + 0x018, 0x0001712A, + 0x0EF, 0x00001000, + 0x03A, 0x00000080, + 0x03B, 0x0003A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0003202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0002B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00023070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0001B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00012085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0000A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00002080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x0007A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0007202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0006B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00063070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0005B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00052085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0004A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00042080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x000BA02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x000B202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x000AB064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x000A3070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0009B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00092085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0008A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00082080, + 0x03C, 0x00010000, + 0x0EF, 0x00001100, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x0004604D, + 0x034, 0x0004504A, + 0x034, 0x00044047, + 0x034, 0x00043044, + 0x034, 0x00042007, + 0x034, 0x00041004, + 0x034, 0x00040001, + 0xA0000000, 0x00000000, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x034, 0x00048DEF, + 0x034, 0x00047DEC, + 0x034, 0x00046DE9, + 0x034, 0x00045DE6, + 0x034, 0x00044DE3, + 0x034, 0x000438C8, + 0x034, 0x000428C5, + 0x034, 0x000418C2, + 0x034, 0x000408C0, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0B4, + 0x034, 0x000290B1, + 0x034, 0x00028072, + 0x034, 0x0002706F, + 0x034, 0x0002604F, + 0x034, 0x0002504C, + 0x034, 0x00024049, + 0x034, 0x00023046, + 0x034, 0x00022009, + 0x034, 0x00021006, + 0x034, 0x00020003, + 0xA0000000, 0x00000000, + 0x034, 0x0002ADF5, + 0x034, 0x00029DF2, + 0x034, 0x00028DEF, + 0x034, 0x00027DEC, + 0x034, 0x00026DE9, + 0x034, 0x00025DE6, + 0x034, 0x00024DE3, + 0x034, 0x000238C8, + 0x034, 0x000228C5, + 0x034, 0x000218C2, + 0x034, 0x000208C0, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x0000604D, + 0x034, 0x0000504A, + 0x034, 0x00004047, + 0x034, 0x00003044, + 0x034, 0x00002007, + 0x034, 0x00001004, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0000AFF7, + 0x034, 0x00009DF7, + 0x034, 0x00008DF4, + 0x034, 0x00007DF1, + 0x034, 0x00006DEE, + 0x034, 0x00005DEB, + 0x034, 0x00004DE8, + 0x034, 0x000038CC, + 0x034, 0x000028C9, + 0x034, 0x000018C6, + 0x034, 0x000008C3, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001D4, + 0x035, 0x000081D4, + 0x035, 0x000101D4, + 0x035, 0x000201B4, + 0x035, 0x000281B4, + 0x035, 0x000301B4, + 0x035, 0x000401B4, + 0x035, 0x000481B4, + 0x035, 0x000501B4, + 0xA0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x00000188, + 0x035, 0x00008147, + 0x035, 0x00010147, + 0x035, 0x000201D7, + 0x035, 0x000281D7, + 0x035, 0x000301D7, + 0x035, 0x000401D8, + 0x035, 0x000481D8, + 0x035, 0x000501D8, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00004BFB, + 0x036, 0x0000CBFB, + 0x036, 0x00014BFB, + 0x036, 0x0001CBFB, + 0x036, 0x00024F4B, + 0x036, 0x0002CF4B, + 0x036, 0x00034F4B, + 0x036, 0x0003CF4B, + 0x036, 0x00044F4B, + 0x036, 0x0004CF4B, + 0x036, 0x00054F4B, + 0x036, 0x0005CF4B, + 0xA0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00084EB4, + 0x036, 0x0008CC35, + 0x036, 0x00094C35, + 0x036, 0x0009CC35, + 0x036, 0x000A4C35, + 0x036, 0x000ACC35, + 0x036, 0x000B4C35, + 0x036, 0x000BCC35, + 0x036, 0x000C4C34, + 0x036, 0x000CCC35, + 0x036, 0x000D4C35, + 0x036, 0x000DCC35, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x03C, 0x000002CC, + 0x03C, 0x00000522, + 0x03C, 0x00000902, + 0xA0000000, 0x00000000, + 0x03C, 0x000002A8, + 0x03C, 0x000005A2, + 0x03C, 0x00000880, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000002, + 0x0DF, 0x00000080, + 0x01F, 0x00000064, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D1, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x061, 0x000FDD43, + 0x062, 0x00038F4B, + 0x063, 0x00032117, + 0x064, 0x000194AC, + 0x065, 0x000931D2, + 0xA0000000, 0x00000000, + 0x061, 0x000E5D53, + 0x062, 0x00038FCD, + 0x063, 0x000114EB, + 0x064, 0x000196AC, + 0x065, 0x000911D7, + 0xB0000000, 0x00000000, + 0x008, 0x00008400, + 0x01C, 0x000739D2, + 0x0B4, 0x0001E78D, + 0x018, 0x0001F12A, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0x0B4, 0x0001A78D, + 0x018, 0x0001712A, +}; + +RTW_DECL_TABLE_RF_RADIO(rtw8812a_rf_a, A); + +static const u32 rtw8812a_rf_b[] = { + 0x056, 0x00051CF2, + 0x066, 0x00040000, + 0x089, 0x00000080, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x00014B3A, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, + 0x086, 0x00014B3A, + 0xA0000000, 0x00000000, + 0x086, 0x00014B38, + 0xB0000000, 0x00000000, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x08B, 0x00080180, + 0xA0000000, 0x00000000, + 0x08B, 0x00087180, + 0xB0000000, 0x00000000, + 0x018, 0x00000006, + 0x0EF, 0x00002000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0x90000001, 0x00000005, 0x40000000, 0x00000000, + 0x03B, 0x0003F218, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xA0000000, 0x00000000, + 0x03B, 0x00038A58, + 0x03B, 0x00037A58, + 0x03B, 0x0002A590, + 0x03B, 0x00027A50, + 0x03B, 0x00018248, + 0x03B, 0x00010240, + 0x03B, 0x00008240, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000100, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A4EE, + 0x034, 0x00009076, + 0x034, 0x00008073, + 0x034, 0x00007070, + 0x034, 0x0000606D, + 0x034, 0x0000506A, + 0x034, 0x00004049, + 0x034, 0x00003046, + 0x034, 0x00002028, + 0x034, 0x00001025, + 0x034, 0x00000022, + 0xA0000000, 0x00000000, + 0x034, 0x0000ADF4, + 0x034, 0x00009DF1, + 0x034, 0x00008DEE, + 0x034, 0x00007DEB, + 0x034, 0x00006DE8, + 0x034, 0x00005DE5, + 0x034, 0x00004DE2, + 0x034, 0x00003CE6, + 0x034, 0x000024E7, + 0x034, 0x000014E4, + 0x034, 0x000004E1, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x0EF, 0x00000002, + 0x008, 0x00008400, + 0x018, 0x0001712A, + 0x0EF, 0x00001000, + 0x03A, 0x00000080, + 0x03B, 0x0003A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0003202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0002B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00023070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0001B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00012085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0000A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00002080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x0007A02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x0007202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x0006B064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x00063070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0005B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00052085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0004A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00042080, + 0x03C, 0x00010000, + 0x03A, 0x00000080, + 0x03B, 0x000BA02C, + 0x03C, 0x00004000, + 0x03A, 0x00000400, + 0x03B, 0x000B202C, + 0x03C, 0x00010000, + 0x03A, 0x000000A0, + 0x03B, 0x000AB064, + 0x03C, 0x00004000, + 0x03A, 0x000000D8, + 0x03B, 0x000A3070, + 0x03C, 0x00004000, + 0x03A, 0x00000468, + 0x03B, 0x0009B870, + 0x03C, 0x00010000, + 0x03A, 0x00000098, + 0x03B, 0x00092085, + 0x03C, 0x000E4000, + 0x03A, 0x00000418, + 0x03B, 0x0008A080, + 0x03C, 0x000F0000, + 0x03A, 0x00000418, + 0x03B, 0x00082080, + 0x03C, 0x00010000, + 0x0EF, 0x00001100, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0B2, + 0x034, 0x000490AF, + 0x034, 0x00048070, + 0x034, 0x0004706D, + 0x034, 0x00046050, + 0x034, 0x0004504D, + 0x034, 0x0004404A, + 0x034, 0x00043047, + 0x034, 0x0004200A, + 0x034, 0x00041007, + 0x034, 0x00040004, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0B1, + 0x034, 0x000490AE, + 0x034, 0x0004806F, + 0x034, 0x0004706C, + 0x034, 0x0004604C, + 0x034, 0x00045049, + 0x034, 0x00044046, + 0x034, 0x00043043, + 0x034, 0x00042006, + 0x034, 0x00041003, + 0x034, 0x00040000, + 0xA0000000, 0x00000000, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x034, 0x00048DEF, + 0x034, 0x00047DEC, + 0x034, 0x00046DE9, + 0x034, 0x00045DE6, + 0x034, 0x00044DE3, + 0x034, 0x000438C8, + 0x034, 0x000428C5, + 0x034, 0x000418C2, + 0x034, 0x000408C0, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0B2, + 0x034, 0x000290AF, + 0x034, 0x00028070, + 0x034, 0x0002706D, + 0x034, 0x00026050, + 0x034, 0x0002504D, + 0x034, 0x0002404A, + 0x034, 0x00023047, + 0x034, 0x0002200A, + 0x034, 0x00021007, + 0x034, 0x00020004, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0B3, + 0x034, 0x000290B0, + 0x034, 0x00028071, + 0x034, 0x0002706E, + 0x034, 0x0002604E, + 0x034, 0x0002504B, + 0x034, 0x00024048, + 0x034, 0x00023045, + 0x034, 0x00022008, + 0x034, 0x00021005, + 0x034, 0x00020002, + 0xA0000000, 0x00000000, + 0x034, 0x0002ADF5, + 0x034, 0x00029DF2, + 0x034, 0x00028DEF, + 0x034, 0x00027DEC, + 0x034, 0x00026DE9, + 0x034, 0x00025DE6, + 0x034, 0x00024DE3, + 0x034, 0x000238C8, + 0x034, 0x000228C5, + 0x034, 0x000218C2, + 0x034, 0x000208C0, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0B2, + 0x034, 0x000090AF, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x00006050, + 0x034, 0x0000504D, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x0000200A, + 0x034, 0x00001007, + 0x034, 0x00000004, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0B3, + 0x034, 0x000090B0, + 0x034, 0x00008070, + 0x034, 0x0000706D, + 0x034, 0x0000604D, + 0x034, 0x0000504A, + 0x034, 0x00004047, + 0x034, 0x00003044, + 0x034, 0x00002007, + 0x034, 0x00001004, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0000AFF7, + 0x034, 0x00009DF7, + 0x034, 0x00008DF4, + 0x034, 0x00007DF1, + 0x034, 0x00006DEE, + 0x034, 0x00005DEB, + 0x034, 0x00004DE8, + 0x034, 0x000038CC, + 0x034, 0x000028C9, + 0x034, 0x000018C6, + 0x034, 0x000008C3, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x000001C5, + 0x035, 0x000081C5, + 0x035, 0x000101C5, + 0x035, 0x00020174, + 0x035, 0x00028174, + 0x035, 0x00030174, + 0x035, 0x00040185, + 0x035, 0x00048185, + 0x035, 0x00050185, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x035, 0x00000188, + 0x035, 0x00008147, + 0x035, 0x00010147, + 0x035, 0x000201D7, + 0x035, 0x000281D7, + 0x035, 0x000301D7, + 0x035, 0x000401D8, + 0x035, 0x000481D8, + 0x035, 0x000501D8, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00005B8B, + 0x036, 0x0000DB8B, + 0x036, 0x00015B8B, + 0x036, 0x0001DB8B, + 0x036, 0x000262DB, + 0x036, 0x0002E2DB, + 0x036, 0x000362DB, + 0x036, 0x0003E2DB, + 0x036, 0x0004553B, + 0x036, 0x0004D53B, + 0x036, 0x0005553B, + 0x036, 0x0005D53B, + 0xA0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x036, 0x00084EB4, + 0x036, 0x0008CC35, + 0x036, 0x00094C35, + 0x036, 0x0009CC35, + 0x036, 0x000A4C35, + 0x036, 0x000ACC35, + 0x036, 0x000B4C35, + 0x036, 0x000BCC35, + 0x036, 0x000C4C34, + 0x036, 0x000CCC35, + 0x036, 0x000D4C35, + 0x036, 0x000DCC35, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x03C, 0x000002DC, + 0x03C, 0x00000524, + 0x03C, 0x00000902, + 0xA0000000, 0x00000000, + 0x03C, 0x000002A8, + 0x03C, 0x000005A2, + 0x03C, 0x00000880, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000002, + 0x0DF, 0x00000080, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0x90000008, 0x05000000, 0x40000000, 0x00000000, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D2, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EAC43, + 0x062, 0x00038F47, + 0x063, 0x00031157, + 0x064, 0x0001C4AC, + 0x065, 0x000931D1, + 0xA0000000, 0x00000000, + 0x061, 0x000E5D53, + 0x062, 0x00038FCD, + 0x063, 0x000114EB, + 0x064, 0x000196AC, + 0x065, 0x000911D7, + 0xB0000000, 0x00000000, + 0x008, 0x00008400, +}; + +RTW_DECL_TABLE_RF_RADIO(rtw8812a_rf_b, B); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8812a_txpwr_lmt[] = { + { 0, 0, 0, 0, 1, 36, }, + { 2, 0, 0, 0, 1, 32, }, + { 1, 0, 0, 0, 1, 32, }, + { 0, 0, 0, 0, 2, 36, }, + { 2, 0, 0, 0, 2, 32, }, + { 1, 0, 0, 0, 2, 32, }, + { 0, 0, 0, 0, 3, 36, }, + { 2, 0, 0, 0, 3, 32, }, + { 1, 0, 0, 0, 3, 32, }, + { 0, 0, 0, 0, 4, 36, }, + { 2, 0, 0, 0, 4, 32, }, + { 1, 0, 0, 0, 4, 32, }, + { 0, 0, 0, 0, 5, 36, }, + { 2, 0, 0, 0, 5, 32, }, + { 1, 0, 0, 0, 5, 32, }, + { 0, 0, 0, 0, 6, 36, }, + { 2, 0, 0, 0, 6, 32, }, + { 1, 0, 0, 0, 6, 32, }, + { 0, 0, 0, 0, 7, 36, }, + { 2, 0, 0, 0, 7, 32, }, + { 1, 0, 0, 0, 7, 32, }, + { 0, 0, 0, 0, 8, 36, }, + { 2, 0, 0, 0, 8, 32, }, + { 1, 0, 0, 0, 8, 32, }, + { 0, 0, 0, 0, 9, 36, }, + { 2, 0, 0, 0, 9, 32, }, + { 1, 0, 0, 0, 9, 32, }, + { 0, 0, 0, 0, 10, 36, }, + { 2, 0, 0, 0, 10, 32, }, + { 1, 0, 0, 0, 10, 32, }, + { 0, 0, 0, 0, 11, 36, }, + { 2, 0, 0, 0, 11, 32, }, + { 1, 0, 0, 0, 11, 32, }, + { 0, 0, 0, 0, 12, 63, }, + { 2, 0, 0, 0, 12, 32, }, + { 1, 0, 0, 0, 12, 32, }, + { 0, 0, 0, 0, 13, 63, }, + { 2, 0, 0, 0, 13, 32, }, + { 1, 0, 0, 0, 13, 32, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 32, }, + { 0, 0, 0, 1, 1, 34, }, + { 2, 0, 0, 1, 1, 32, }, + { 1, 0, 0, 1, 1, 32, }, + { 0, 0, 0, 1, 2, 36, }, + { 2, 0, 0, 1, 2, 32, }, + { 1, 0, 0, 1, 2, 32, }, + { 0, 0, 0, 1, 3, 36, }, + { 2, 0, 0, 1, 3, 32, }, + { 1, 0, 0, 1, 3, 32, }, + { 0, 0, 0, 1, 4, 36, }, + { 2, 0, 0, 1, 4, 32, }, + { 1, 0, 0, 1, 4, 32, }, + { 0, 0, 0, 1, 5, 36, }, + { 2, 0, 0, 1, 5, 32, }, + { 1, 0, 0, 1, 5, 32, }, + { 0, 0, 0, 1, 6, 36, }, + { 2, 0, 0, 1, 6, 32, }, + { 1, 0, 0, 1, 6, 32, }, + { 0, 0, 0, 1, 7, 36, }, + { 2, 0, 0, 1, 7, 32, }, + { 1, 0, 0, 1, 7, 32, }, + { 0, 0, 0, 1, 8, 36, }, + { 2, 0, 0, 1, 8, 32, }, + { 1, 0, 0, 1, 8, 32, }, + { 0, 0, 0, 1, 9, 36, }, + { 2, 0, 0, 1, 9, 32, }, + { 1, 0, 0, 1, 9, 32, }, + { 0, 0, 0, 1, 10, 36, }, + { 2, 0, 0, 1, 10, 32, }, + { 1, 0, 0, 1, 10, 32, }, + { 0, 0, 0, 1, 11, 32, }, + { 2, 0, 0, 1, 11, 32, }, + { 1, 0, 0, 1, 11, 32, }, + { 0, 0, 0, 1, 12, 63, }, + { 2, 0, 0, 1, 12, 32, }, + { 1, 0, 0, 1, 12, 32, }, + { 0, 0, 0, 1, 13, 63, }, + { 2, 0, 0, 1, 13, 32, }, + { 1, 0, 0, 1, 13, 32, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 34, }, + { 2, 0, 0, 2, 1, 32, }, + { 1, 0, 0, 2, 1, 32, }, + { 0, 0, 0, 2, 2, 36, }, + { 2, 0, 0, 2, 2, 32, }, + { 1, 0, 0, 2, 2, 32, }, + { 0, 0, 0, 2, 3, 36, }, + { 2, 0, 0, 2, 3, 32, }, + { 1, 0, 0, 2, 3, 32, }, + { 0, 0, 0, 2, 4, 36, }, + { 2, 0, 0, 2, 4, 32, }, + { 1, 0, 0, 2, 4, 32, }, + { 0, 0, 0, 2, 5, 36, }, + { 2, 0, 0, 2, 5, 32, }, + { 1, 0, 0, 2, 5, 32, }, + { 0, 0, 0, 2, 6, 36, }, + { 2, 0, 0, 2, 6, 32, }, + { 1, 0, 0, 2, 6, 32, }, + { 0, 0, 0, 2, 7, 36, }, + { 2, 0, 0, 2, 7, 32, }, + { 1, 0, 0, 2, 7, 32, }, + { 0, 0, 0, 2, 8, 36, }, + { 2, 0, 0, 2, 8, 32, }, + { 1, 0, 0, 2, 8, 32, }, + { 0, 0, 0, 2, 9, 36, }, + { 2, 0, 0, 2, 9, 32, }, + { 1, 0, 0, 2, 9, 32, }, + { 0, 0, 0, 2, 10, 36, }, + { 2, 0, 0, 2, 10, 32, }, + { 1, 0, 0, 2, 10, 32, }, + { 0, 0, 0, 2, 11, 32, }, + { 2, 0, 0, 2, 11, 32, }, + { 1, 0, 0, 2, 11, 32, }, + { 0, 0, 0, 2, 12, 63, }, + { 2, 0, 0, 2, 12, 32, }, + { 1, 0, 0, 2, 12, 32, }, + { 0, 0, 0, 2, 13, 63, }, + { 2, 0, 0, 2, 13, 32, }, + { 1, 0, 0, 2, 13, 32, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 32, }, + { 2, 0, 0, 3, 1, 32, }, + { 1, 0, 0, 3, 1, 32, }, + { 0, 0, 0, 3, 2, 34, }, + { 2, 0, 0, 3, 2, 32, }, + { 1, 0, 0, 3, 2, 32, }, + { 0, 0, 0, 3, 3, 34, }, + { 2, 0, 0, 3, 3, 32, }, + { 1, 0, 0, 3, 3, 32, }, + { 0, 0, 0, 3, 4, 34, }, + { 2, 0, 0, 3, 4, 32, }, + { 1, 0, 0, 3, 4, 32, }, + { 0, 0, 0, 3, 5, 34, }, + { 2, 0, 0, 3, 5, 32, }, + { 1, 0, 0, 3, 5, 32, }, + { 0, 0, 0, 3, 6, 34, }, + { 2, 0, 0, 3, 6, 32, }, + { 1, 0, 0, 3, 6, 32, }, + { 0, 0, 0, 3, 7, 34, }, + { 2, 0, 0, 3, 7, 32, }, + { 1, 0, 0, 3, 7, 32, }, + { 0, 0, 0, 3, 8, 34, }, + { 2, 0, 0, 3, 8, 32, }, + { 1, 0, 0, 3, 8, 32, }, + { 0, 0, 0, 3, 9, 34, }, + { 2, 0, 0, 3, 9, 32, }, + { 1, 0, 0, 3, 9, 32, }, + { 0, 0, 0, 3, 10, 34, }, + { 2, 0, 0, 3, 10, 32, }, + { 1, 0, 0, 3, 10, 32, }, + { 0, 0, 0, 3, 11, 30, }, + { 2, 0, 0, 3, 11, 32, }, + { 1, 0, 0, 3, 11, 32, }, + { 0, 0, 0, 3, 12, 63, }, + { 2, 0, 0, 3, 12, 32, }, + { 1, 0, 0, 3, 12, 32, }, + { 0, 0, 0, 3, 13, 63, }, + { 2, 0, 0, 3, 13, 32, }, + { 1, 0, 0, 3, 13, 32, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 32, }, + { 2, 0, 1, 2, 3, 32, }, + { 1, 0, 1, 2, 3, 32, }, + { 0, 0, 1, 2, 4, 36, }, + { 2, 0, 1, 2, 4, 32, }, + { 1, 0, 1, 2, 4, 32, }, + { 0, 0, 1, 2, 5, 36, }, + { 2, 0, 1, 2, 5, 32, }, + { 1, 0, 1, 2, 5, 32, }, + { 0, 0, 1, 2, 6, 36, }, + { 2, 0, 1, 2, 6, 32, }, + { 1, 0, 1, 2, 6, 32, }, + { 0, 0, 1, 2, 7, 36, }, + { 2, 0, 1, 2, 7, 32, }, + { 1, 0, 1, 2, 7, 32, }, + { 0, 0, 1, 2, 8, 36, }, + { 2, 0, 1, 2, 8, 32, }, + { 1, 0, 1, 2, 8, 32, }, + { 0, 0, 1, 2, 9, 36, }, + { 2, 0, 1, 2, 9, 32, }, + { 1, 0, 1, 2, 9, 32, }, + { 0, 0, 1, 2, 10, 36, }, + { 2, 0, 1, 2, 10, 32, }, + { 1, 0, 1, 2, 10, 32, }, + { 0, 0, 1, 2, 11, 32, }, + { 2, 0, 1, 2, 11, 32, }, + { 1, 0, 1, 2, 11, 32, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 32, }, + { 1, 0, 1, 2, 12, 32, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 32, }, + { 1, 0, 1, 2, 13, 32, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 30, }, + { 2, 0, 1, 3, 3, 30, }, + { 1, 0, 1, 3, 3, 30, }, + { 0, 0, 1, 3, 4, 34, }, + { 2, 0, 1, 3, 4, 30, }, + { 1, 0, 1, 3, 4, 30, }, + { 0, 0, 1, 3, 5, 34, }, + { 2, 0, 1, 3, 5, 30, }, + { 1, 0, 1, 3, 5, 30, }, + { 0, 0, 1, 3, 6, 34, }, + { 2, 0, 1, 3, 6, 30, }, + { 1, 0, 1, 3, 6, 30, }, + { 0, 0, 1, 3, 7, 34, }, + { 2, 0, 1, 3, 7, 30, }, + { 1, 0, 1, 3, 7, 30, }, + { 0, 0, 1, 3, 8, 34, }, + { 2, 0, 1, 3, 8, 30, }, + { 1, 0, 1, 3, 8, 30, }, + { 0, 0, 1, 3, 9, 34, }, + { 2, 0, 1, 3, 9, 30, }, + { 1, 0, 1, 3, 9, 30, }, + { 0, 0, 1, 3, 10, 34, }, + { 2, 0, 1, 3, 10, 30, }, + { 1, 0, 1, 3, 10, 30, }, + { 0, 0, 1, 3, 11, 30, }, + { 2, 0, 1, 3, 11, 30, }, + { 1, 0, 1, 3, 11, 30, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 32, }, + { 1, 0, 1, 3, 12, 32, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 32, }, + { 1, 0, 1, 3, 13, 32, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 1, 0, 1, 36, 30, }, + { 2, 1, 0, 1, 36, 32, }, + { 1, 1, 0, 1, 36, 32, }, + { 0, 1, 0, 1, 40, 30, }, + { 2, 1, 0, 1, 40, 32, }, + { 1, 1, 0, 1, 40, 32, }, + { 0, 1, 0, 1, 44, 30, }, + { 2, 1, 0, 1, 44, 32, }, + { 1, 1, 0, 1, 44, 32, }, + { 0, 1, 0, 1, 48, 30, }, + { 2, 1, 0, 1, 48, 32, }, + { 1, 1, 0, 1, 48, 32, }, + { 0, 1, 0, 1, 52, 36, }, + { 2, 1, 0, 1, 52, 32, }, + { 1, 1, 0, 1, 52, 32, }, + { 0, 1, 0, 1, 56, 34, }, + { 2, 1, 0, 1, 56, 32, }, + { 1, 1, 0, 1, 56, 32, }, + { 0, 1, 0, 1, 60, 32, }, + { 2, 1, 0, 1, 60, 32, }, + { 1, 1, 0, 1, 60, 32, }, + { 0, 1, 0, 1, 64, 28, }, + { 2, 1, 0, 1, 64, 32, }, + { 1, 1, 0, 1, 64, 32, }, + { 0, 1, 0, 1, 100, 30, }, + { 2, 1, 0, 1, 100, 32, }, + { 1, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 30, }, + { 2, 1, 0, 1, 104, 32, }, + { 1, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 32, }, + { 2, 1, 0, 1, 108, 32, }, + { 1, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 34, }, + { 2, 1, 0, 1, 112, 32, }, + { 1, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 34, }, + { 2, 1, 0, 1, 116, 32, }, + { 1, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 36, }, + { 2, 1, 0, 1, 120, 32, }, + { 1, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 34, }, + { 2, 1, 0, 1, 124, 32, }, + { 1, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 32, }, + { 2, 1, 0, 1, 128, 32, }, + { 1, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 30, }, + { 2, 1, 0, 1, 132, 32, }, + { 1, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 30, }, + { 2, 1, 0, 1, 136, 32, }, + { 1, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 28, }, + { 2, 1, 0, 1, 140, 32, }, + { 1, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 149, 36, }, + { 2, 1, 0, 1, 149, 32, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 36, }, + { 2, 1, 0, 1, 153, 32, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 36, }, + { 2, 1, 0, 1, 157, 32, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 36, }, + { 2, 1, 0, 1, 161, 32, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 36, }, + { 2, 1, 0, 1, 165, 32, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 30, }, + { 2, 1, 0, 2, 36, 32, }, + { 1, 1, 0, 2, 36, 32, }, + { 0, 1, 0, 2, 40, 30, }, + { 2, 1, 0, 2, 40, 32, }, + { 1, 1, 0, 2, 40, 32, }, + { 0, 1, 0, 2, 44, 30, }, + { 2, 1, 0, 2, 44, 32, }, + { 1, 1, 0, 2, 44, 32, }, + { 0, 1, 0, 2, 48, 30, }, + { 2, 1, 0, 2, 48, 32, }, + { 1, 1, 0, 2, 48, 32, }, + { 0, 1, 0, 2, 52, 36, }, + { 2, 1, 0, 2, 52, 32, }, + { 1, 1, 0, 2, 52, 32, }, + { 0, 1, 0, 2, 56, 34, }, + { 2, 1, 0, 2, 56, 32, }, + { 1, 1, 0, 2, 56, 32, }, + { 0, 1, 0, 2, 60, 32, }, + { 2, 1, 0, 2, 60, 32, }, + { 1, 1, 0, 2, 60, 32, }, + { 0, 1, 0, 2, 64, 28, }, + { 2, 1, 0, 2, 64, 32, }, + { 1, 1, 0, 2, 64, 32, }, + { 0, 1, 0, 2, 100, 30, }, + { 2, 1, 0, 2, 100, 32, }, + { 1, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 30, }, + { 2, 1, 0, 2, 104, 32, }, + { 1, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 32, }, + { 2, 1, 0, 2, 108, 32, }, + { 1, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 34, }, + { 2, 1, 0, 2, 112, 32, }, + { 1, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 34, }, + { 2, 1, 0, 2, 116, 32, }, + { 1, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 36, }, + { 2, 1, 0, 2, 120, 32, }, + { 1, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 34, }, + { 2, 1, 0, 2, 124, 32, }, + { 1, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 32, }, + { 2, 1, 0, 2, 128, 32, }, + { 1, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 30, }, + { 2, 1, 0, 2, 132, 32, }, + { 1, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 30, }, + { 2, 1, 0, 2, 136, 32, }, + { 1, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 28, }, + { 2, 1, 0, 2, 140, 32, }, + { 1, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 149, 36, }, + { 2, 1, 0, 2, 149, 32, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 36, }, + { 2, 1, 0, 2, 153, 32, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 36, }, + { 2, 1, 0, 2, 157, 32, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 36, }, + { 2, 1, 0, 2, 161, 32, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 36, }, + { 2, 1, 0, 2, 165, 32, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 28, }, + { 2, 1, 0, 3, 36, 30, }, + { 1, 1, 0, 3, 36, 30, }, + { 0, 1, 0, 3, 40, 28, }, + { 2, 1, 0, 3, 40, 30, }, + { 1, 1, 0, 3, 40, 30, }, + { 0, 1, 0, 3, 44, 28, }, + { 2, 1, 0, 3, 44, 30, }, + { 1, 1, 0, 3, 44, 30, }, + { 0, 1, 0, 3, 48, 28, }, + { 2, 1, 0, 3, 48, 30, }, + { 1, 1, 0, 3, 48, 30, }, + { 0, 1, 0, 3, 52, 34, }, + { 2, 1, 0, 3, 52, 30, }, + { 1, 1, 0, 3, 52, 30, }, + { 0, 1, 0, 3, 56, 32, }, + { 2, 1, 0, 3, 56, 30, }, + { 1, 1, 0, 3, 56, 30, }, + { 0, 1, 0, 3, 60, 30, }, + { 2, 1, 0, 3, 60, 30, }, + { 1, 1, 0, 3, 60, 30, }, + { 0, 1, 0, 3, 64, 26, }, + { 2, 1, 0, 3, 64, 30, }, + { 1, 1, 0, 3, 64, 30, }, + { 0, 1, 0, 3, 100, 28, }, + { 2, 1, 0, 3, 100, 30, }, + { 1, 1, 0, 3, 100, 30, }, + { 0, 1, 0, 3, 104, 28, }, + { 2, 1, 0, 3, 104, 30, }, + { 1, 1, 0, 3, 104, 30, }, + { 0, 1, 0, 3, 108, 30, }, + { 2, 1, 0, 3, 108, 30, }, + { 1, 1, 0, 3, 108, 30, }, + { 0, 1, 0, 3, 112, 32, }, + { 2, 1, 0, 3, 112, 30, }, + { 1, 1, 0, 3, 112, 30, }, + { 0, 1, 0, 3, 116, 32, }, + { 2, 1, 0, 3, 116, 30, }, + { 1, 1, 0, 3, 116, 30, }, + { 0, 1, 0, 3, 120, 34, }, + { 2, 1, 0, 3, 120, 30, }, + { 1, 1, 0, 3, 120, 30, }, + { 0, 1, 0, 3, 124, 32, }, + { 2, 1, 0, 3, 124, 30, }, + { 1, 1, 0, 3, 124, 30, }, + { 0, 1, 0, 3, 128, 30, }, + { 2, 1, 0, 3, 128, 30, }, + { 1, 1, 0, 3, 128, 30, }, + { 0, 1, 0, 3, 132, 28, }, + { 2, 1, 0, 3, 132, 30, }, + { 1, 1, 0, 3, 132, 30, }, + { 0, 1, 0, 3, 136, 28, }, + { 2, 1, 0, 3, 136, 30, }, + { 1, 1, 0, 3, 136, 30, }, + { 0, 1, 0, 3, 140, 26, }, + { 2, 1, 0, 3, 140, 30, }, + { 1, 1, 0, 3, 140, 30, }, + { 0, 1, 0, 3, 149, 34, }, + { 2, 1, 0, 3, 149, 30, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 34, }, + { 2, 1, 0, 3, 153, 30, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 34, }, + { 2, 1, 0, 3, 157, 30, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 34, }, + { 2, 1, 0, 3, 161, 30, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 34, }, + { 2, 1, 0, 3, 165, 30, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 1, 2, 38, 30, }, + { 2, 1, 1, 2, 38, 32, }, + { 1, 1, 1, 2, 38, 32, }, + { 0, 1, 1, 2, 46, 30, }, + { 2, 1, 1, 2, 46, 32, }, + { 1, 1, 1, 2, 46, 32, }, + { 0, 1, 1, 2, 54, 32, }, + { 2, 1, 1, 2, 54, 32, }, + { 1, 1, 1, 2, 54, 32, }, + { 0, 1, 1, 2, 62, 32, }, + { 2, 1, 1, 2, 62, 32, }, + { 1, 1, 1, 2, 62, 32, }, + { 0, 1, 1, 2, 102, 28, }, + { 2, 1, 1, 2, 102, 32, }, + { 1, 1, 1, 2, 102, 32, }, + { 0, 1, 1, 2, 110, 32, }, + { 2, 1, 1, 2, 110, 32, }, + { 1, 1, 1, 2, 110, 32, }, + { 0, 1, 1, 2, 118, 36, }, + { 2, 1, 1, 2, 118, 32, }, + { 1, 1, 1, 2, 118, 32, }, + { 0, 1, 1, 2, 126, 34, }, + { 2, 1, 1, 2, 126, 32, }, + { 1, 1, 1, 2, 126, 32, }, + { 0, 1, 1, 2, 134, 32, }, + { 2, 1, 1, 2, 134, 32, }, + { 1, 1, 1, 2, 134, 32, }, + { 0, 1, 1, 2, 151, 36, }, + { 2, 1, 1, 2, 151, 32, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 36, }, + { 2, 1, 1, 2, 159, 32, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 28, }, + { 2, 1, 1, 3, 38, 30, }, + { 1, 1, 1, 3, 38, 30, }, + { 0, 1, 1, 3, 46, 28, }, + { 2, 1, 1, 3, 46, 30, }, + { 1, 1, 1, 3, 46, 30, }, + { 0, 1, 1, 3, 54, 30, }, + { 2, 1, 1, 3, 54, 30, }, + { 1, 1, 1, 3, 54, 30, }, + { 0, 1, 1, 3, 62, 30, }, + { 2, 1, 1, 3, 62, 30, }, + { 1, 1, 1, 3, 62, 30, }, + { 0, 1, 1, 3, 102, 26, }, + { 2, 1, 1, 3, 102, 30, }, + { 1, 1, 1, 3, 102, 30, }, + { 0, 1, 1, 3, 110, 30, }, + { 2, 1, 1, 3, 110, 30, }, + { 1, 1, 1, 3, 110, 30, }, + { 0, 1, 1, 3, 118, 34, }, + { 2, 1, 1, 3, 118, 30, }, + { 1, 1, 1, 3, 118, 30, }, + { 0, 1, 1, 3, 126, 32, }, + { 2, 1, 1, 3, 126, 30, }, + { 1, 1, 1, 3, 126, 30, }, + { 0, 1, 1, 3, 134, 30, }, + { 2, 1, 1, 3, 134, 30, }, + { 1, 1, 1, 3, 134, 30, }, + { 0, 1, 1, 3, 151, 34, }, + { 2, 1, 1, 3, 151, 30, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 34, }, + { 2, 1, 1, 3, 159, 30, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 2, 4, 42, 30, }, + { 2, 1, 2, 4, 42, 32, }, + { 1, 1, 2, 4, 42, 32, }, + { 0, 1, 2, 4, 58, 28, }, + { 2, 1, 2, 4, 58, 32, }, + { 1, 1, 2, 4, 58, 32, }, + { 0, 1, 2, 4, 106, 30, }, + { 2, 1, 2, 4, 106, 32, }, + { 1, 1, 2, 4, 106, 32, }, + { 0, 1, 2, 4, 122, 34, }, + { 2, 1, 2, 4, 122, 32, }, + { 1, 1, 2, 4, 122, 32, }, + { 0, 1, 2, 4, 155, 36, }, + { 2, 1, 2, 4, 155, 32, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 28, }, + { 2, 1, 2, 5, 42, 30, }, + { 1, 1, 2, 5, 42, 30, }, + { 0, 1, 2, 5, 58, 26, }, + { 2, 1, 2, 5, 58, 30, }, + { 1, 1, 2, 5, 58, 30, }, + { 0, 1, 2, 5, 106, 28, }, + { 2, 1, 2, 5, 106, 30, }, + { 1, 1, 2, 5, 106, 30, }, + { 0, 1, 2, 5, 122, 32, }, + { 2, 1, 2, 5, 122, 30, }, + { 1, 1, 2, 5, 122, 30, }, + { 0, 1, 2, 5, 155, 34, }, + { 2, 1, 2, 5, 155, 30, }, + { 1, 1, 2, 5, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8812a_txpwr_lmt); + +static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8812a[] = { + {0x0012, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0014, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x80, 0}, + {0x0015, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x01, 0}, + {0x0023, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x10, 0}, + {0x0046, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x00}, + {0x0043, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x00}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), 0}, + {0x0003, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), BIT(2)}, + {0x0301, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0x0024, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0028, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), BIT(3)}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8812a[] = { + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), 0}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), BIT(1)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(0), 0}, + {0x0024, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0028, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_act_to_lps_8812a[] = { + {0x0301, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0xFF}, + {0x0522, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x7F}, + {0x05F8, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xFF, 0}, + {0x05F9, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xFF, 0}, + {0x05FA, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xFF, 0}, + {0x05FB, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xFF, 0}, + {0x0c00, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x04}, + {0x0e00, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x04}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_DELAY, 0, RTW_PWR_DELAY_US}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0100, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x03}, + {0x0101, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0553, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8812a[] = { + {0x0c00, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x04}, + {0x0e00, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x04}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_DELAY, 0, RTW_PWR_DELAY_US}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0007, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x2A}, + {0x0008, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x02, 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8812a[] = { + {0x0003, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), 0}, + {0x0080, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x05}, + {0x0042, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xF0, 0xcc}, + {0x0042, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xF0, 0xEC}, + {0x0043, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x07}, + {0x0045, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x00}, + {0x0046, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0xff}, + {0x0047, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0x0014, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x80, BIT(7)}, + {0x0015, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x01, BIT(0)}, + {0x0012, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x01, 0}, + {0x0023, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x10, BIT(4)}, + {0x0008, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x02, 0}, + {0x0007, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x20}, + {0x001f, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0076, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), BIT(3)}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +const struct rtw_pwr_seq_cmd * const card_enable_flow_8812a[] = { + trans_carddis_to_cardemu_8812a, + trans_cardemu_to_act_8812a, + NULL +}; + +const struct rtw_pwr_seq_cmd * const enter_lps_flow_8812a[] = { + trans_act_to_lps_8812a, + NULL +}; + +const struct rtw_pwr_seq_cmd * const card_disable_flow_8812a[] = { + trans_act_to_cardemu_8812a, + trans_cardemu_to_carddis_8812a, + NULL +}; + +static const u8 rtw8812a_pwrtrk_5gb_n[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, + 12, 13, 13, 14, 14, 14, 14, 14, 14}, + {0, 1, 1, 2, 2, 3, 4, 4, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, + 12, 13, 13, 14, 14, 14, 14, 14, 14}, + {0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 11, 12, 12, 13, + 13, 14, 14, 15, 16, 16, 16, 16, 16}, +}; + +static const u8 rtw8812a_pwrtrk_5gb_p[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, +}; + +static const u8 rtw8812a_pwrtrk_5ga_n[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, + 12, 13, 13, 14, 15, 15, 15, 15, 15}, + {0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, + 12, 13, 13, 14, 15, 15, 15, 15, 15}, + {0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, + 12, 13, 13, 14, 15, 15, 15, 15, 15}, +}; + +static const u8 rtw8812a_pwrtrk_5ga_p[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 3, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11, 11, 12, 12, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, +}; + +static const u8 rtw8812a_pwrtrk_2gb_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8812a_pwrtrk_2gb_p[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 +}; + +static const u8 rtw8812a_pwrtrk_2ga_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 9, 10, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8812a_pwrtrk_2ga_p[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 +}; + +static const u8 rtw8812a_pwrtrk_2g_cck_b_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8812a_pwrtrk_2g_cck_b_p[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 +}; + +static const u8 rtw8812a_pwrtrk_2g_cck_a_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 9, 10, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8812a_pwrtrk_2g_cck_a_p[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 +}; + +const struct rtw_pwr_track_tbl rtw8812a_rtw_pwr_track_tbl = { + .pwrtrk_5gb_n[0] = rtw8812a_pwrtrk_5gb_n[0], + .pwrtrk_5gb_n[1] = rtw8812a_pwrtrk_5gb_n[1], + .pwrtrk_5gb_n[2] = rtw8812a_pwrtrk_5gb_n[2], + .pwrtrk_5gb_p[0] = rtw8812a_pwrtrk_5gb_p[0], + .pwrtrk_5gb_p[1] = rtw8812a_pwrtrk_5gb_p[1], + .pwrtrk_5gb_p[2] = rtw8812a_pwrtrk_5gb_p[2], + .pwrtrk_5ga_n[0] = rtw8812a_pwrtrk_5ga_n[0], + .pwrtrk_5ga_n[1] = rtw8812a_pwrtrk_5ga_n[1], + .pwrtrk_5ga_n[2] = rtw8812a_pwrtrk_5ga_n[2], + .pwrtrk_5ga_p[0] = rtw8812a_pwrtrk_5ga_p[0], + .pwrtrk_5ga_p[1] = rtw8812a_pwrtrk_5ga_p[1], + .pwrtrk_5ga_p[2] = rtw8812a_pwrtrk_5ga_p[2], + .pwrtrk_2gb_n = rtw8812a_pwrtrk_2gb_n, + .pwrtrk_2gb_p = rtw8812a_pwrtrk_2gb_p, + .pwrtrk_2ga_n = rtw8812a_pwrtrk_2ga_n, + .pwrtrk_2ga_p = rtw8812a_pwrtrk_2ga_p, + .pwrtrk_2g_cckb_n = rtw8812a_pwrtrk_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8812a_pwrtrk_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8812a_pwrtrk_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8812a_pwrtrk_2g_cck_a_p, +}; + +static const u8 rtw8812a_pwrtrk_rfe3_5gb_n[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12, 13, + 13, 14, 15, 16, 16, 17, 17, 18, 18}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, + 12, 14, 13, 13, 14, 14, 14, 15, 15}, + {0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, + 12, 13, 13, 14, 14, 15, 15, 16, 16}, +}; + +static const u8 rtw8812a_pwrtrk_rfe3_5gb_p[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 10, 10, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, +}; + +static const u8 rtw8812a_pwrtrk_rfe3_5ga_n[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, + 13, 14, 15, 16, 16, 17, 17, 18, 18}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12, + 12, 13, 13, 14, 15, 16, 16, 17, 17}, + {0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12, 13, + 13, 14, 14, 15, 15, 16, 17, 18, 18}, +}; + +static const u8 rtw8812a_pwrtrk_rfe3_5ga_p[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, + {0, 1, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, 11}, +}; + +static const u8 rtw8812a_pwrtrk_rfe3_2gb_n[] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, + 7, 7, 8, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15 +}; + +static const u8 rtw8812a_pwrtrk_rfe3_2gb_p[] = { + 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, + 6, 7, 7, 8, 9, 10, 10, 10, 10, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8812a_pwrtrk_rfe3_2ga_n[] = { + 0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, + 10, 10, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15 +}; + +static const u8 rtw8812a_pwrtrk_rfe3_2ga_p[] = { + 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8812a_pwrtrk_rfe3_2g_cck_b_n[] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, + 7, 7, 8, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15 +}; + +static const u8 rtw8812a_pwrtrk_rfe3_2g_cck_b_p[] = { + 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, + 6, 7, 7, 8, 9, 10, 10, 10, 10, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8812a_pwrtrk_rfe3_2g_cck_a_n[] = { + 0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, + 10, 10, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 15, 15 +}; + +static const u8 rtw8812a_pwrtrk_rfe3_2g_cck_a_p[] = { + 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11 +}; + +const struct rtw_pwr_track_tbl rtw8812a_rtw_pwr_track_rfe3_tbl = { + .pwrtrk_5gb_n[0] = rtw8812a_pwrtrk_rfe3_5gb_n[0], + .pwrtrk_5gb_n[1] = rtw8812a_pwrtrk_rfe3_5gb_n[1], + .pwrtrk_5gb_n[2] = rtw8812a_pwrtrk_rfe3_5gb_n[2], + .pwrtrk_5gb_p[0] = rtw8812a_pwrtrk_rfe3_5gb_p[0], + .pwrtrk_5gb_p[1] = rtw8812a_pwrtrk_rfe3_5gb_p[1], + .pwrtrk_5gb_p[2] = rtw8812a_pwrtrk_rfe3_5gb_p[2], + .pwrtrk_5ga_n[0] = rtw8812a_pwrtrk_rfe3_5ga_n[0], + .pwrtrk_5ga_n[1] = rtw8812a_pwrtrk_rfe3_5ga_n[1], + .pwrtrk_5ga_n[2] = rtw8812a_pwrtrk_rfe3_5ga_n[2], + .pwrtrk_5ga_p[0] = rtw8812a_pwrtrk_rfe3_5ga_p[0], + .pwrtrk_5ga_p[1] = rtw8812a_pwrtrk_rfe3_5ga_p[1], + .pwrtrk_5ga_p[2] = rtw8812a_pwrtrk_rfe3_5ga_p[2], + .pwrtrk_2gb_n = rtw8812a_pwrtrk_rfe3_2gb_n, + .pwrtrk_2gb_p = rtw8812a_pwrtrk_rfe3_2gb_p, + .pwrtrk_2ga_n = rtw8812a_pwrtrk_rfe3_2ga_n, + .pwrtrk_2ga_p = rtw8812a_pwrtrk_rfe3_2ga_p, + .pwrtrk_2g_cckb_n = rtw8812a_pwrtrk_rfe3_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8812a_pwrtrk_rfe3_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8812a_pwrtrk_rfe3_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8812a_pwrtrk_rfe3_2g_cck_a_p, +}; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812a_table.h b/drivers/net/wireless/realtek/rtw88/rtw8812a_table.h new file mode 100644 index 000000000000..f7ab5e4cf059 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8812a_table.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW8812A_TABLE_H__ +#define __RTW8812A_TABLE_H__ + +extern const struct rtw_table rtw8812a_mac_tbl; +extern const struct rtw_table rtw8812a_agc_tbl; +extern const struct rtw_table rtw8812a_agc_diff_lb_tbl; +extern const struct rtw_table rtw8812a_agc_diff_hb_tbl; +extern const struct rtw_table rtw8812a_bb_tbl; +extern const struct rtw_table rtw8812a_bb_pg_tbl; +extern const struct rtw_table rtw8812a_bb_pg_rfe3_tbl; +extern const struct rtw_table rtw8812a_rf_a_tbl; +extern const struct rtw_table rtw8812a_rf_b_tbl; +extern const struct rtw_table rtw8812a_txpwr_lmt_tbl; + +extern const struct rtw_pwr_seq_cmd * const card_enable_flow_8812a[]; +extern const struct rtw_pwr_seq_cmd * const enter_lps_flow_8812a[]; +extern const struct rtw_pwr_seq_cmd * const card_disable_flow_8812a[]; + +extern const struct rtw_pwr_track_tbl rtw8812a_rtw_pwr_track_tbl; +extern const struct rtw_pwr_track_tbl rtw8812a_rtw_pwr_track_rfe3_tbl; + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8812au.c b/drivers/net/wireless/realtek/rtw88/rtw8812au.c new file mode 100644 index 000000000000..4da69590a423 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8812au.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include <linux/module.h> +#include <linux/usb.h> +#include "main.h" +#include "rtw8812a.h" +#include "usb.h" + +static const struct usb_device_id rtw_8812au_id_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x2604, 0x0012, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8812a_hw_spec) }, + {}, +}; +MODULE_DEVICE_TABLE(usb, rtw_8812au_id_table); + +static struct usb_driver rtw_8812au_driver = { + .name = "rtw_8812au", + .id_table = rtw_8812au_id_table, + .probe = rtw_usb_probe, + .disconnect = rtw_usb_disconnect, +}; +module_usb_driver(rtw_8812au_driver); + +MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8812au driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a.c b/drivers/net/wireless/realtek/rtw88/rtw8821a.c new file mode 100644 index 000000000000..db242c9ad68f --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821a.c @@ -0,0 +1,1197 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "main.h" +#include "coex.h" +#include "phy.h" +#include "reg.h" +#include "rtw88xxa.h" +#include "rtw8821a.h" +#include "rtw8821a_table.h" +#include "tx.h" + +static void rtw8821a_power_off(struct rtw_dev *rtwdev) +{ + rtw88xxa_power_off(rtwdev, enter_lps_flow_8821a); +} + +static s8 rtw8821a_cck_rx_pwr(u8 lna_idx, u8 vga_idx) +{ + static const s8 lna_gain_table[] = {15, -1, -17, 0, -30, -38}; + s8 rx_pwr_all = 0; + s8 lna_gain; + + switch (lna_idx) { + case 5: + case 4: + case 2: + case 1: + case 0: + lna_gain = lna_gain_table[lna_idx]; + rx_pwr_all = lna_gain - 2 * vga_idx; + break; + default: + break; + } + + return rx_pwr_all; +} + +static void rtw8821a_query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, + struct rtw_rx_pkt_stat *pkt_stat) +{ + rtw88xxa_query_phy_status(rtwdev, phy_status, pkt_stat, + rtw8821a_cck_rx_pwr); +} + +static void rtw8821a_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) +{ +} + +#define CAL_NUM_8821A 3 +#define MACBB_REG_NUM_8821A 8 +#define AFE_REG_NUM_8821A 4 +#define RF_REG_NUM_8821A 3 + +static void rtw8821a_iqk_backup_rf(struct rtw_dev *rtwdev, u32 *rfa_backup, + const u32 *backup_rf_reg, u32 rf_num) +{ + u32 i; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + /* Save RF Parameters */ + for (i = 0; i < rf_num; i++) + rfa_backup[i] = rtw_read_rf(rtwdev, RF_PATH_A, + backup_rf_reg[i], MASKDWORD); +} + +static void rtw8821a_iqk_restore_rf(struct rtw_dev *rtwdev, + const u32 *backup_rf_reg, + u32 *RF_backup, u32 rf_reg_num) +{ + u32 i; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + for (i = 0; i < rf_reg_num; i++) + rtw_write_rf(rtwdev, RF_PATH_A, backup_rf_reg[i], + RFREG_MASK, RF_backup[i]); +} + +static void rtw8821a_iqk_restore_afe(struct rtw_dev *rtwdev, u32 *afe_backup, + const u32 *backup_afe_reg, u32 afe_num) +{ + u32 i; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + /* Reload AFE Parameters */ + for (i = 0; i < afe_num; i++) + rtw_write32(rtwdev, backup_afe_reg[i], afe_backup[i]); + + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + rtw_write32(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, 0x0); + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x0); + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, 0x0); + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x3c000000); + rtw_write32(rtwdev, REG_LSSI_WRITE_A, 0x00000080); + rtw_write32(rtwdev, REG_TXAGCIDX, 0x00000000); + rtw_write32(rtwdev, REG_IQK_DPD_CFG, 0x20040000); + rtw_write32(rtwdev, REG_CFG_PMPD, 0x20000000); + rtw_write32(rtwdev, REG_RFECTL_A, 0x0); +} + +static void rtw8821a_iqk_rx_fill(struct rtw_dev *rtwdev, + unsigned int rx_x, unsigned int rx_y) +{ + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_A, + 0x000003ff, rx_x >> 1); + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_A, + 0x03ff0000, (rx_y >> 1) & 0x3ff); +} + +static void rtw8821a_iqk_tx_fill(struct rtw_dev *rtwdev, + unsigned int tx_x, unsigned int tx_y) +{ + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + rtw_write32(rtwdev, REG_LSSI_WRITE_A, 0x00000080); + rtw_write32(rtwdev, REG_IQK_DPD_CFG, 0x20040000); + rtw_write32(rtwdev, REG_CFG_PMPD, 0x20000000); + rtw_write32_mask(rtwdev, REG_IQC_Y, 0x000007ff, tx_y); + rtw_write32_mask(rtwdev, REG_IQC_X, 0x000007ff, tx_x); +} + +static void rtw8821a_iqk_tx_vdf_true(struct rtw_dev *rtwdev, u32 cal, + bool *tx0iqkok, + int tx_x0[CAL_NUM_8821A], + int tx_y0[CAL_NUM_8821A]) +{ + u32 cal_retry, delay_count, iqk_ready, tx_fail; + int tx_dt[3], vdf_y[3], vdf_x[3]; + int k; + + for (k = 0; k < 3; k++) { + switch (k) { + case 0: + /* TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtw_write32(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, + 0x18008c38); + /* RX_Tone_idx[9:0], RxK_Mask[29] */ + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x38008c38); + rtw_write32_mask(rtwdev, REG_INTPO_SETA, BIT(31), 0x0); + break; + case 1: + rtw_write32_mask(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, + BIT(28), 0x0); + rtw_write32_mask(rtwdev, REG_OFDM0_A_TX_AFE, + BIT(28), 0x0); + rtw_write32_mask(rtwdev, REG_INTPO_SETA, BIT(31), 0x0); + break; + case 2: + rtw_dbg(rtwdev, RTW_DBG_RFK, + "vdf_y[1] = %x vdf_y[0] = %x\n", + vdf_y[1] >> 21 & 0x00007ff, + vdf_y[0] >> 21 & 0x00007ff); + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "vdf_x[1] = %x vdf_x[0] = %x\n", + vdf_x[1] >> 21 & 0x00007ff, + vdf_x[0] >> 21 & 0x00007ff); + + tx_dt[cal] = (vdf_y[1] >> 20) - (vdf_y[0] >> 20); + tx_dt[cal] = (16 * tx_dt[cal]) * 10000 / 15708; + tx_dt[cal] = (tx_dt[cal] >> 1) + (tx_dt[cal] & BIT(0)); + + /* TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtw_write32(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, + 0x18008c20); + /* RX_Tone_idx[9:0], RxK_Mask[29] */ + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x38008c20); + rtw_write32_mask(rtwdev, REG_INTPO_SETA, BIT(31), 0x1); + rtw_write32_mask(rtwdev, REG_INTPO_SETA, 0x3fff0000, + tx_dt[cal] & 0x00003fff); + break; + } + + rtw_write32(rtwdev, REG_RFECTL_A, 0x00100000); + + for (cal_retry = 0; cal_retry < 10; cal_retry++) { + /* one shot */ + rtw_write32(rtwdev, REG_IQK_COM64, 0xfa000000); + rtw_write32(rtwdev, REG_IQK_COM64, 0xf8000000); + + mdelay(10); + + rtw_write32(rtwdev, REG_RFECTL_A, 0x00000000); + + for (delay_count = 0; delay_count < 20; delay_count++) { + iqk_ready = rtw_read32_mask(rtwdev, + REG_IQKA_END, + BIT(10)); + + /* Originally: if (~iqk_ready || delay_count > 20) + * that looks like a typo so make it more explicit + */ + iqk_ready = true; + + if (iqk_ready) + break; + + mdelay(1); + } + + if (delay_count < 20) { + /* ============TXIQK Check============== */ + tx_fail = rtw_read32_mask(rtwdev, + REG_IQKA_END, + BIT(12)); + + /* Originally: if (~tx_fail) { + * It looks like a typo, so make it more explicit. + */ + tx_fail = false; + + if (!tx_fail) { + rtw_write32(rtwdev, REG_RFECTL_A, + 0x02000000); + vdf_x[k] = rtw_read32_mask(rtwdev, + REG_IQKA_END, + 0x07ff0000); + vdf_x[k] <<= 21; + + rtw_write32(rtwdev, REG_RFECTL_A, + 0x04000000); + vdf_y[k] = rtw_read32_mask(rtwdev, + REG_IQKA_END, + 0x07ff0000); + vdf_y[k] <<= 21; + + *tx0iqkok = true; + break; + } + + rtw_write32_mask(rtwdev, REG_IQC_Y, + 0x000007ff, 0x0); + rtw_write32_mask(rtwdev, REG_IQC_X, + 0x000007ff, 0x200); + } + + *tx0iqkok = false; + } + } + + if (k == 3) { + tx_x0[cal] = vdf_x[k - 1]; + tx_y0[cal] = vdf_y[k - 1]; + } +} + +static void rtw8821a_iqk_tx_vdf_false(struct rtw_dev *rtwdev, u32 cal, + bool *tx0iqkok, + int tx_x0[CAL_NUM_8821A], + int tx_y0[CAL_NUM_8821A]) +{ + u32 cal_retry, delay_count, iqk_ready, tx_fail; + + /* TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtw_write32(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, 0x18008c10); + /* RX_Tone_idx[9:0], RxK_Mask[29] */ + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x38008c10); + rtw_write32(rtwdev, REG_RFECTL_A, 0x00100000); + + for (cal_retry = 0; cal_retry < 10; cal_retry++) { + /* one shot */ + rtw_write32(rtwdev, REG_IQK_COM64, 0xfa000000); + rtw_write32(rtwdev, REG_IQK_COM64, 0xf8000000); + + mdelay(10); + rtw_write32(rtwdev, REG_RFECTL_A, 0x00000000); + + for (delay_count = 0; delay_count < 20; delay_count++) { + iqk_ready = rtw_read32_mask(rtwdev, REG_IQKA_END, BIT(10)); + + /* Originally: if (~iqk_ready || delay_count > 20) + * that looks like a typo so make it more explicit + */ + iqk_ready = true; + + if (iqk_ready) + break; + + mdelay(1); + } + + if (delay_count < 20) { + /* ============TXIQK Check============== */ + tx_fail = rtw_read32_mask(rtwdev, REG_IQKA_END, BIT(12)); + + /* Originally: if (~tx_fail) { + * It looks like a typo, so make it more explicit. + */ + tx_fail = false; + + if (!tx_fail) { + rtw_write32(rtwdev, REG_RFECTL_A, 0x02000000); + tx_x0[cal] = rtw_read32_mask(rtwdev, REG_IQKA_END, + 0x07ff0000); + tx_x0[cal] <<= 21; + + rtw_write32(rtwdev, REG_RFECTL_A, 0x04000000); + tx_y0[cal] = rtw_read32_mask(rtwdev, REG_IQKA_END, + 0x07ff0000); + tx_y0[cal] <<= 21; + + *tx0iqkok = true; + break; + } + + rtw_write32_mask(rtwdev, REG_IQC_Y, 0x000007ff, 0x0); + rtw_write32_mask(rtwdev, REG_IQC_X, 0x000007ff, 0x200); + } + + *tx0iqkok = false; + } +} + +static void rtw8821a_iqk_rx(struct rtw_dev *rtwdev, u32 cal, bool *rx0iqkok, + int rx_x0[CAL_NUM_8821A], + int rx_y0[CAL_NUM_8821A]) +{ + u32 cal_retry, delay_count, iqk_ready, rx_fail; + + rtw_write32(rtwdev, REG_RFECTL_A, 0x00100000); + + for (cal_retry = 0; cal_retry < 10; cal_retry++) { + /* one shot */ + rtw_write32(rtwdev, REG_IQK_COM64, 0xfa000000); + rtw_write32(rtwdev, REG_IQK_COM64, 0xf8000000); + + mdelay(10); + + rtw_write32(rtwdev, REG_RFECTL_A, 0x00000000); + + for (delay_count = 0; delay_count < 20; delay_count++) { + iqk_ready = rtw_read32_mask(rtwdev, REG_IQKA_END, BIT(10)); + + /* Originally: if (~iqk_ready || delay_count > 20) + * that looks like a typo so make it more explicit + */ + iqk_ready = true; + + if (iqk_ready) + break; + + mdelay(1); + } + + if (delay_count < 20) { + /* ============RXIQK Check============== */ + rx_fail = rtw_read32_mask(rtwdev, REG_IQKA_END, BIT(11)); + if (!rx_fail) { + rtw_write32(rtwdev, REG_RFECTL_A, 0x06000000); + rx_x0[cal] = rtw_read32_mask(rtwdev, REG_IQKA_END, + 0x07ff0000); + rx_x0[cal] <<= 21; + + rtw_write32(rtwdev, REG_RFECTL_A, 0x08000000); + rx_y0[cal] = rtw_read32_mask(rtwdev, REG_IQKA_END, + 0x07ff0000); + rx_y0[cal] <<= 21; + + *rx0iqkok = true; + break; + } + + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_A, + 0x000003ff, 0x200 >> 1); + rtw_write32_mask(rtwdev, REG_RX_IQC_AB_A, + 0x03ff0000, 0x0 >> 1); + } + + *rx0iqkok = false; + } +} + +static void rtw8821a_iqk(struct rtw_dev *rtwdev) +{ + int tx_average = 0, rx_average = 0, rx_iqk_loop = 0; + const struct rtw_efuse *efuse = &rtwdev->efuse; + int tx_x = 0, tx_y = 0, rx_x = 0, rx_y = 0; + const struct rtw_hal *hal = &rtwdev->hal; + bool tx0iqkok = false, rx0iqkok = false; + int rx_x_temp = 0, rx_y_temp = 0; + int rx_x0[2][CAL_NUM_8821A]; + int rx_y0[2][CAL_NUM_8821A]; + int tx_x0[CAL_NUM_8821A]; + int tx_y0[CAL_NUM_8821A]; + bool rx_finish1 = false; + bool rx_finish2 = false; + bool vdf_enable; + u32 cal; + int i; + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "band_width = %d, ext_pa = %d, ext_pa_5g = %d\n", + hal->current_band_width, efuse->ext_pa_2g, efuse->ext_pa_5g); + + vdf_enable = hal->current_band_width == RTW_CHANNEL_WIDTH_80; + + for (cal = 0; cal < CAL_NUM_8821A; cal++) { + /* path-A LOK */ + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + /* ========path-A AFE all on======== */ + /* Port 0 DAC/ADC on */ + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x77777777); + rtw_write32(rtwdev, REG_AFE_PWR2_A, 0x77777777); + + rtw_write32(rtwdev, REG_RX_WAIT_CCA_TX_CCK_RFON_A, 0x19791979); + + /* hardware 3-wire off */ + rtw_write32_mask(rtwdev, REG_3WIRE_SWA, 0xf, 0x4); + + /* LOK setting */ + + /* 1. DAC/ADC sampling rate (160 MHz) */ + rtw_write32_mask(rtwdev, REG_CK_MONHA, GENMASK(26, 24), 0x7); + + /* 2. LoK RF setting (at BW = 20M) */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80002); + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, 0x00c00, 0x3); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_ADDR, RFREG_MASK, + 0x20000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA0, RFREG_MASK, + 0x0003f); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA1, RFREG_MASK, + 0xf3fc3); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_TXA_PREPAD, RFREG_MASK, + 0x931d5); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RXBB2, RFREG_MASK, 0x8a001); + rtw_write32(rtwdev, REG_DAC_RSTB, 0x00008000); + rtw_write32_mask(rtwdev, REG_TXAGCIDX, BIT(0), 0x1); + /* TX (X,Y) */ + rtw_write32(rtwdev, REG_IQK_COM00, 0x29002000); + /* RX (X,Y) */ + rtw_write32(rtwdev, REG_IQK_COM32, 0xa9002000); + /* [0]:AGC_en, [15]:idac_K_Mask */ + rtw_write32(rtwdev, REG_IQK_COM96, 0x00462910); + + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + if (efuse->ext_pa_5g) + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, + 0x821403f7); + else + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, + 0x821403f4); + + if (hal->current_band_type == RTW_BAND_5G) + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x68163e96); + else + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x28163e96); + + /* TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtw_write32(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, 0x18008c10); + /* RX_Tone_idx[9:0], RxK_Mask[29] */ + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x38008c10); + rtw_write32(rtwdev, REG_RFECTL_A, 0x00100000); + rtw_write32(rtwdev, REG_IQK_COM64, 0xfa000000); + rtw_write32(rtwdev, REG_IQK_COM64, 0xf8000000); + + mdelay(10); + rtw_write32(rtwdev, REG_RFECTL_A, 0x00000000); + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + rtw_write_rf(rtwdev, RF_PATH_A, RF_TXMOD, 0x7fe00, + rtw_read_rf(rtwdev, RF_PATH_A, RF_DTXLOK, 0xffc00)); + + if (hal->current_band_width == RTW_CHANNEL_WIDTH_40) + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, + RF18_BW_MASK, 0x1); + else if (hal->current_band_width == RTW_CHANNEL_WIDTH_80) + rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, + RF18_BW_MASK, 0x0); + + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + /* 3. TX RF setting */ + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_ADDR, RFREG_MASK, + 0x20000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA0, RFREG_MASK, + 0x0003f); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA1, RFREG_MASK, + 0xf3fc3); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_TXA_PREPAD, RFREG_MASK, 0x931d5); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RXBB2, RFREG_MASK, 0x8a001); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x00000); + rtw_write32(rtwdev, REG_DAC_RSTB, 0x00008000); + rtw_write32_mask(rtwdev, REG_TXAGCIDX, BIT(0), 0x1); + /* TX (X,Y) */ + rtw_write32(rtwdev, REG_IQK_COM00, 0x29002000); + /* RX (X,Y) */ + rtw_write32(rtwdev, REG_IQK_COM32, 0xa9002000); + /* [0]:AGC_en, [15]:idac_K_Mask */ + rtw_write32(rtwdev, REG_IQK_COM96, 0x0046a910); + + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + if (efuse->ext_pa_5g) + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, + 0x821403f7); + else + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, + 0x821403e3); + + if (hal->current_band_type == RTW_BAND_5G) + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x40163e96); + else + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x00163e96); + + if (vdf_enable) + rtw8821a_iqk_tx_vdf_true(rtwdev, cal, &tx0iqkok, + tx_x0, tx_y0); + else + rtw8821a_iqk_tx_vdf_false(rtwdev, cal, &tx0iqkok, + tx_x0, tx_y0); + + if (!tx0iqkok) + break; /* TXK fail, Don't do RXK */ + + /* ====== RX IQK ====== */ + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + /* 1. RX RF setting */ + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_ADDR, RFREG_MASK, + 0x30000); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA0, RFREG_MASK, + 0x0002f); + rtw_write_rf(rtwdev, RF_PATH_A, RF_MODE_TABLE_DATA1, RFREG_MASK, + 0xfffbb); + rtw_write_rf(rtwdev, RF_PATH_A, RF_RXBB2, RFREG_MASK, 0x88001); + rtw_write_rf(rtwdev, RF_PATH_A, RF_TXA_PREPAD, RFREG_MASK, 0x931d8); + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x00000); + + rtw_write32_mask(rtwdev, REG_IQK_COM00, 0x03FF8000, + (tx_x0[cal] >> 21) & 0x000007ff); + rtw_write32_mask(rtwdev, REG_IQK_COM00, 0x000007FF, + (tx_y0[cal] >> 21) & 0x000007ff); + rtw_write32_mask(rtwdev, REG_IQK_COM00, BIT(31), 0x1); + rtw_write32_mask(rtwdev, REG_IQK_COM00, BIT(31), 0x0); + rtw_write32(rtwdev, REG_DAC_RSTB, 0x00008000); + rtw_write32(rtwdev, REG_IQK_COM96, 0x0046a911); + + /* [31] = 1 --> Page C1 */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x1); + + /* TX_Tone_idx[9:0], TxK_Mask[29] TX_Tone = 16 */ + rtw_write32(rtwdev, REG_OFDM0_XA_TX_IQ_IMBALANCE, 0x38008c10); + /* RX_Tone_idx[9:0], RxK_Mask[29] */ + rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x18008c10); + rtw_write32(rtwdev, REG_OFDM0_XB_TX_IQ_IMBALANCE, 0x02140119); + + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) + rx_iqk_loop = 2; /* for 2% fail; */ + else + rx_iqk_loop = 1; + + for (i = 0; i < rx_iqk_loop; i++) { + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE && i == 0) + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x28161100); /* Good */ + else + rtw_write32(rtwdev, REG_TSSI_TRK_SW, 0x28160d00); + + rtw8821a_iqk_rx(rtwdev, cal, &rx0iqkok, + rx_x0[i], rx_y0[i]); + } + + if (tx0iqkok) + tx_average++; + if (rx0iqkok) + rx_average++; + } + + /* FillIQK Result */ + + if (tx_average == 0) + return; + + for (i = 0; i < tx_average; i++) + rtw_dbg(rtwdev, RTW_DBG_RFK, + "tx_x0[%d] = %x ;; tx_y0[%d] = %x\n", + i, (tx_x0[i] >> 21) & 0x000007ff, + i, (tx_y0[i] >> 21) & 0x000007ff); + + if (rtw88xxa_iqk_finish(tx_average, 3, tx_x0, tx_y0, + &tx_x, &tx_y, true, true)) + rtw8821a_iqk_tx_fill(rtwdev, tx_x, tx_y); + else + rtw8821a_iqk_tx_fill(rtwdev, 0x200, 0x0); + + if (rx_average == 0) + return; + + for (i = 0; i < rx_average; i++) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "rx_x0[0][%d] = %x ;; rx_y0[0][%d] = %x\n", + i, (rx_x0[0][i] >> 21) & 0x000007ff, + i, (rx_y0[0][i] >> 21) & 0x000007ff); + + if (rx_iqk_loop == 2) + rtw_dbg(rtwdev, RTW_DBG_RFK, + "rx_x0[1][%d] = %x ;; rx_y0[1][%d] = %x\n", + i, (rx_x0[1][i] >> 21) & 0x000007ff, + i, (rx_y0[1][i] >> 21) & 0x000007ff); + } + + rx_finish1 = rtw88xxa_iqk_finish(rx_average, 4, rx_x0[0], rx_y0[0], + &rx_x_temp, &rx_y_temp, true, true); + + if (rx_finish1) { + rx_x = rx_x_temp; + rx_y = rx_y_temp; + } + + if (rx_iqk_loop == 2) { + rx_finish2 = rtw88xxa_iqk_finish(rx_average, 4, + rx_x0[1], rx_y0[1], + &rx_x, &rx_y, true, true); + + if (rx_finish1 && rx_finish2) { + rx_x = (rx_x + rx_x_temp) / 2; + rx_y = (rx_y + rx_y_temp) / 2; + } + } + + if (rx_finish1 || rx_finish2) + rtw8821a_iqk_rx_fill(rtwdev, rx_x, rx_y); + else + rtw8821a_iqk_rx_fill(rtwdev, 0x200, 0x0); +} + +static void rtw8821a_do_iqk(struct rtw_dev *rtwdev) +{ + static const u32 backup_macbb_reg[MACBB_REG_NUM_8821A] = { + 0x520, 0x550, 0x808, 0xa04, 0x90c, 0xc00, 0x838, 0x82c + }; + static const u32 backup_afe_reg[AFE_REG_NUM_8821A] = { + 0xc5c, 0xc60, 0xc64, 0xc68 + }; + static const u32 backup_rf_reg[RF_REG_NUM_8821A] = { + 0x65, 0x8f, 0x0 + }; + u32 macbb_backup[MACBB_REG_NUM_8821A]; + u32 afe_backup[AFE_REG_NUM_8821A]; + u32 rfa_backup[RF_REG_NUM_8821A]; + + rtw88xxa_iqk_backup_mac_bb(rtwdev, macbb_backup, + backup_macbb_reg, MACBB_REG_NUM_8821A); + rtw88xxa_iqk_backup_afe(rtwdev, afe_backup, + backup_afe_reg, AFE_REG_NUM_8821A); + rtw8821a_iqk_backup_rf(rtwdev, rfa_backup, + backup_rf_reg, RF_REG_NUM_8821A); + + rtw88xxa_iqk_configure_mac(rtwdev); + + rtw8821a_iqk(rtwdev); + + rtw8821a_iqk_restore_rf(rtwdev, backup_rf_reg, + rfa_backup, RF_REG_NUM_8821A); + rtw8821a_iqk_restore_afe(rtwdev, afe_backup, + backup_afe_reg, AFE_REG_NUM_8821A); + rtw88xxa_iqk_restore_mac_bb(rtwdev, macbb_backup, + backup_macbb_reg, MACBB_REG_NUM_8821A); +} + +static void rtw8821a_phy_calibration(struct rtw_dev *rtwdev) +{ + rtw8821a_do_iqk(rtwdev); +} + +static void rtw8821a_pwr_track(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + if (!dm_info->pwr_trk_triggered) { + rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, + GENMASK(17, 16), 0x03); + dm_info->pwr_trk_triggered = true; + return; + } + + rtw88xxa_phy_pwrtrack(rtwdev, NULL, rtw8821a_do_iqk); + dm_info->pwr_trk_triggered = false; +} + +static void rtw8821a_fill_txdesc_checksum(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + u8 *txdesc) +{ + fill_txdesc_checksum_common(txdesc, 16); +} + +static void rtw8821a_coex_cfg_init(struct rtw_dev *rtwdev) +{ + u8 val8; + + /* BT report packet sample rate */ + rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5); + + val8 = BIT_STATIS_BT_EN; + if (rtwdev->efuse.share_ant) + val8 |= BIT_R_GRANTALL_WLMASK; + rtw_write8(rtwdev, REG_BT_COEX_ENH_INTR_CTRL, val8); + + /* enable BT counter statistics */ + rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x3); + + /* enable PTA */ + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); +} + +static void rtw8821a_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, + u8 pos_type) +{ + bool share_ant = rtwdev->efuse.share_ant; + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + u32 phase = coex_dm->cur_ant_pos_type; + + if (!rtwdev->efuse.btcoex) + return; + + switch (phase) { + case COEX_SET_ANT_POWERON: + case COEX_SET_ANT_INIT: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + rtw_write8_set(rtwdev, REG_GNT_BT, BIT_PTA_SW_CTL); + + rtw_write8(rtwdev, REG_RFE_CTRL8, + share_ant ? PTA_CTRL_PIN : DPDT_CTRL_PIN); + rtw_write32_mask(rtwdev, REG_RFE_CTRL8, 0x30000000, 0x1); + break; + case COEX_SET_ANT_WONLY: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + rtw_write8_clr(rtwdev, REG_GNT_BT, BIT_PTA_SW_CTL); + + rtw_write8(rtwdev, REG_RFE_CTRL8, DPDT_CTRL_PIN); + rtw_write32_mask(rtwdev, REG_RFE_CTRL8, 0x30000000, 0x1); + break; + case COEX_SET_ANT_2G: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + rtw_write8_clr(rtwdev, REG_GNT_BT, BIT_PTA_SW_CTL); + + rtw_write8(rtwdev, REG_RFE_CTRL8, + share_ant ? PTA_CTRL_PIN : DPDT_CTRL_PIN); + rtw_write32_mask(rtwdev, REG_RFE_CTRL8, 0x30000000, 0x1); + break; + case COEX_SET_ANT_5G: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + rtw_write8_set(rtwdev, REG_GNT_BT, BIT_PTA_SW_CTL); + + rtw_write8(rtwdev, REG_RFE_CTRL8, DPDT_CTRL_PIN); + rtw_write32_mask(rtwdev, REG_RFE_CTRL8, 0x30000000, + share_ant ? 0x2 : 0x1); + break; + case COEX_SET_ANT_WOFF: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + rtw_write8_set(rtwdev, REG_GNT_BT, BIT_PTA_SW_CTL); + + rtw_write8(rtwdev, REG_RFE_CTRL8, DPDT_CTRL_PIN); + rtw_write32_mask(rtwdev, REG_RFE_CTRL8, 0x30000000, + share_ant ? 0x2 : 0x1); + break; + default: + rtw_warn(rtwdev, "%s: not handling phase %d\n", + __func__, phase); + break; + } +} + +static void rtw8821a_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) +{ +} + +static void rtw8821a_coex_cfg_gnt_debug(struct rtw_dev *rtwdev) +{ +} + +static void rtw8821a_coex_cfg_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + + coex_rfe->ant_switch_exist = true; +} + +static void rtw8821a_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + bool share_ant = efuse->share_ant; + + if (share_ant) + return; + + if (wl_pwr == coex_dm->cur_wl_pwr_lvl) + return; + + coex_dm->cur_wl_pwr_lvl = wl_pwr; +} + +static void rtw8821a_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{ +} + +static const struct rtw_chip_ops rtw8821a_ops = { + .power_on = rtw88xxa_power_on, + .power_off = rtw8821a_power_off, + .phy_set_param = NULL, + .read_efuse = rtw88xxa_read_efuse, + .query_phy_status = rtw8821a_query_phy_status, + .set_channel = rtw88xxa_set_channel, + .mac_init = NULL, + .read_rf = rtw88xxa_phy_read_rf, + .write_rf = rtw_phy_write_rf_reg_sipi, + .set_antenna = NULL, + .set_tx_power_index = rtw88xxa_set_tx_power_index, + .cfg_ldo25 = rtw8821a_cfg_ldo25, + .efuse_grant = rtw88xxa_efuse_grant, + .false_alarm_statistics = rtw88xxa_false_alarm_statistics, + .phy_calibration = rtw8821a_phy_calibration, + .cck_pd_set = rtw88xxa_phy_cck_pd_set, + .pwr_track = rtw8821a_pwr_track, + .config_bfee = NULL, + .set_gid_table = NULL, + .cfg_csi_rate = NULL, + .fill_txdesc_checksum = rtw8821a_fill_txdesc_checksum, + .coex_set_init = rtw8821a_coex_cfg_init, + .coex_set_ant_switch = rtw8821a_coex_cfg_ant_switch, + .coex_set_gnt_fix = rtw8821a_coex_cfg_gnt_fix, + .coex_set_gnt_debug = rtw8821a_coex_cfg_gnt_debug, + .coex_set_rfe_type = rtw8821a_coex_cfg_rfe_type, + .coex_set_wl_tx_power = rtw8821a_coex_cfg_wl_tx_power, + .coex_set_wl_rx_gain = rtw8821a_coex_cfg_wl_rx_gain, +}; + +static const struct rtw_page_table page_table_8821a[] = { + /* hq_num, nq_num, lq_num, exq_num, gapq_num */ + {0, 0, 0, 0, 0}, /* SDIO */ + {0, 0, 0, 0, 0}, /* PCI */ + {8, 0, 0, 0, 1}, /* 2 bulk out endpoints */ + {8, 0, 8, 0, 1}, /* 3 bulk out endpoints */ + {8, 0, 8, 4, 1}, /* 4 bulk out endpoints */ +}; + +static const struct rtw_rqpn rqpn_table_8821a[] = { + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, + + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, + + {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH, + RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + + {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, +}; + +static const struct rtw_prioq_addrs prioq_addrs_8821a = { + .prio[RTW_DMA_MAPPING_EXTRA] = { + .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3, + }, + .prio[RTW_DMA_MAPPING_LOW] = { + .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1, + }, + .prio[RTW_DMA_MAPPING_NORMAL] = { + .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1, + }, + .prio[RTW_DMA_MAPPING_HIGH] = { + .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2, + }, + .wsize = false, +}; + +static const struct rtw_hw_reg rtw8821a_dig[] = { + [0] = { .addr = REG_RXIGI_A, .mask = 0x7f }, +}; + +static const struct rtw_rfe_def rtw8821a_rfe_defs[] = { + [0] = { .phy_pg_tbl = &rtw8821a_bb_pg_tbl, + .txpwr_lmt_tbl = &rtw8821a_txpwr_lmt_tbl, + .pwr_track_tbl = &rtw8821a_rtw_pwr_track_tbl, }, +}; + +/* TODO */ +/* rssi in percentage % (dbm = % - 100) */ +static const u8 wl_rssi_step_8821a[] = {101, 45, 101, 40}; +static const u8 bt_rssi_step_8821a[] = {101, 101, 101, 101}; + +/* table_sant_8821a, table_nsant_8821a, tdma_sant_8821a, and tdma_nsant_8821a + * are copied from rtw8821c.c because the 8821au driver's tables are not + * compatible with the coex code in rtw88. + * + * tdma case 112 (A2DP) byte 0 had to be modified from 0x61 to 0x51, + * otherwise the firmware gets confused after pausing the music: + * rtw_8821au 1-2:1.2: [BTCoex], Bt_info[1], len=7, data=[81 00 0a 01 00 00] + * - 81 means PAN (personal area network) when it should be 4x (A2DP) + * The music is not smooth with the PAN algorithm. + */ + +/* Shared-Antenna Coex Table */ +static const struct coex_table_para table_sant_8821a[] = { + {0x55555555, 0x55555555}, /* case-0 */ + {0x55555555, 0x55555555}, + {0x66555555, 0x66555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xfafafafa, 0xfafafafa}, /* case-5 */ + {0x6a5a5555, 0xaaaaaaaa}, + {0x6a5a56aa, 0x6a5a56aa}, + {0x6a5a5a5a, 0x6a5a5a5a}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-10 */ + {0x66555555, 0xaaaaaaaa}, + {0x66555555, 0x6a5a5aaa}, + {0x66555555, 0x6aaa6aaa}, + {0x66555555, 0x6a5a5aaa}, + {0x66555555, 0xaaaaaaaa}, /* case-15 */ + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x6afa5afa}, + {0xaaffffaa, 0xfafafafa}, + {0xaa5555aa, 0x5a5a5a5a}, + {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ + {0xaa5555aa, 0xaaaaaaaa}, + {0xffffffff, 0x55555555}, + {0xffffffff, 0x5a5a5a5a}, + {0xffffffff, 0x5a5a5a5a}, + {0xffffffff, 0x5a5a5aaa}, /* case-25 */ + {0x55555555, 0x5a5a5a5a}, + {0x55555555, 0xaaaaaaaa}, + {0x66555555, 0x6a5a6a5a}, + {0x66556655, 0x66556655}, + {0x66556aaa, 0x6a5a6aaa}, /* case-30 */ + {0xffffffff, 0x5aaa5aaa}, + {0x56555555, 0x5a5a5aaa} +}; + +/* Non-Shared-Antenna Coex Table */ +static const struct coex_table_para table_nsant_8821a[] = { + {0xffffffff, 0xffffffff}, /* case-100 */ + {0xffff55ff, 0xfafafafa}, + {0x66555555, 0x66555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xffffffff, 0xffffffff}, /* case-105 */ + {0x5afa5afa, 0x5afa5afa}, + {0x55555555, 0xfafafafa}, + {0x66555555, 0xfafafafa}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-110 */ + {0x66555555, 0xaaaaaaaa}, + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x5afa5afa}, + {0xffff55ff, 0xaaaaaaaa}, + {0xffff55ff, 0xffff55ff}, /* case-115 */ + {0xaaffffaa, 0x5afa5afa}, + {0xaaffffaa, 0xaaaaaaaa}, + {0xffffffff, 0xfafafafa}, + {0xffff55ff, 0xfafafafa}, + {0xffffffff, 0xaaaaaaaa}, /* case-120 */ + {0xffff55ff, 0x5afa5afa}, + {0xffff55ff, 0x5afa5afa}, + {0x55ff55ff, 0x55ff55ff} +}; + +/* Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_sant_8821a[] = { + { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */ + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, + { {0x61, 0x35, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, /* case-5 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x35, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */ + { {0x61, 0x08, 0x03, 0x11, 0x15} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */ + { {0x51, 0x45, 0x03, 0x10, 0x50} }, + { {0x51, 0x3a, 0x03, 0x11, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x21, 0x03, 0x10, 0x50} }, + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */ + { {0x51, 0x4a, 0x03, 0x10, 0x50} }, + { {0x51, 0x08, 0x03, 0x30, 0x54} }, + { {0x55, 0x08, 0x03, 0x10, 0x54} }, + { {0x65, 0x10, 0x03, 0x11, 0x10} }, + { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ + { {0x51, 0x21, 0x03, 0x10, 0x50} }, + { {0x61, 0x08, 0x03, 0x11, 0x11} } +}; + +/* Non-Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_nsant_8821a[] = { + { {0x00, 0x00, 0x00, 0x40, 0x00} }, /* case-100 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, + { {0x61, 0x25, 0x03, 0x11, 0x11} }, + { {0x61, 0x35, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */ + { {0x61, 0x10, 0x03, 0x11, 0x11} }, + { {0x51, 0x08, 0x03, 0x10, 0x14} }, /* a2dp high rssi */ + { {0x51, 0x08, 0x03, 0x10, 0x54} }, /* a2dp not high rssi */ + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */ + { {0x51, 0x45, 0x03, 0x10, 0x50} }, + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x21, 0x03, 0x10, 0x50} }, + { {0x51, 0x21, 0x03, 0x10, 0x50} }, /* case-120 */ + { {0x51, 0x10, 0x03, 0x10, 0x50} } +}; + +/* TODO */ +static const struct coex_rf_para rf_para_tx_8821a[] = { + {0, 0, false, 7}, /* for normal */ + {0, 20, false, 7}, /* for WL-CPT */ + {8, 17, true, 4}, + {7, 18, true, 4}, + {6, 19, true, 4}, + {5, 20, true, 4} +}; + +static const struct coex_rf_para rf_para_rx_8821a[] = { + {0, 0, false, 7}, /* for normal */ + {0, 20, false, 7}, /* for WL-CPT */ + {3, 24, true, 5}, + {2, 26, true, 5}, + {1, 27, true, 5}, + {0, 28, true, 5} +}; + +static_assert(ARRAY_SIZE(rf_para_tx_8821a) == ARRAY_SIZE(rf_para_rx_8821a)); + +static const struct coex_5g_afh_map afh_5g_8821a[] = { {0, 0, 0} }; + +static const struct rtw_reg_domain coex_info_hw_regs_8821a[] = { + {0xCB0, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0xCB4, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0xCBA, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0, 0, RTW_REG_DOMAIN_NL}, + {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16}, + {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8}, + {0x454, MASKLWORD, RTW_REG_DOMAIN_MAC16}, + {0, 0, RTW_REG_DOMAIN_NL}, + {0x4c, BIT(24) | BIT(23), RTW_REG_DOMAIN_MAC32}, + {0x64, BIT(0), RTW_REG_DOMAIN_MAC8}, + {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8}, + {0x40, BIT(5), RTW_REG_DOMAIN_MAC8}, + {0x1, RFREG_MASK, RTW_REG_DOMAIN_RF_A}, + {0, 0, RTW_REG_DOMAIN_NL}, + {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0x953, BIT(1), RTW_REG_DOMAIN_MAC8}, + {0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0x60A, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, +}; + +const struct rtw_chip_info rtw8821a_hw_spec = { + .ops = &rtw8821a_ops, + .id = RTW_CHIP_TYPE_8821A, + .fw_name = "rtw88/rtw8821a_fw.bin", + .wlan_cpu = RTW_WCPU_11N, + .tx_pkt_desc_sz = 40, + .tx_buf_desc_sz = 16, + .rx_pkt_desc_sz = 24, + .rx_buf_desc_sz = 8, + .phy_efuse_size = 512, + .log_efuse_size = 512, + .ptct_efuse_size = 96 + 1, /* TODO or just 18? */ + .txff_size = 65536, + .rxff_size = 16128, + .rsvd_drv_pg_num = 8, + .txgi_factor = 1, + .is_pwr_by_rate_dec = true, + .max_power_index = 0x3f, + .csi_buf_pg_num = 0, + .band = RTW_BAND_2G | RTW_BAND_5G, + .page_size = 256, + .dig_min = 0x20, + .ht_supported = true, + .vht_supported = true, + .lps_deep_mode_supported = 0, + .sys_func_en = 0xFD, + .pwr_on_seq = card_enable_flow_8821a, + .pwr_off_seq = card_disable_flow_8821a, + .page_table = page_table_8821a, + .rqpn_table = rqpn_table_8821a, + .prioq_addrs = &prioq_addrs_8821a, + .intf_table = NULL, + .dig = rtw8821a_dig, + .rf_sipi_addr = {REG_LSSI_WRITE_A, REG_LSSI_WRITE_B}, + .ltecoex_addr = NULL, + .mac_tbl = &rtw8821a_mac_tbl, + .agc_tbl = &rtw8821a_agc_tbl, + .bb_tbl = &rtw8821a_bb_tbl, + .rf_tbl = {&rtw8821a_rf_a_tbl}, + .rfe_defs = rtw8821a_rfe_defs, + .rfe_defs_size = ARRAY_SIZE(rtw8821a_rfe_defs), + .rx_ldpc = false, + .hw_feature_report = false, + .c2h_ra_report_size = 4, + .old_datarate_fb_limit = true, + .usb_tx_agg_desc_num = 6, + .iqk_threshold = 8, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, + .max_scan_ie_len = IEEE80211_MAX_DATA_LEN, + + .coex_para_ver = 20190509, /* glcoex_ver_date_8821a_1ant */ + .bt_desired_ver = 0x62, /* But for 2 ant it's 0x5c */ + .scbd_support = false, + .new_scbd10_def = false, + .ble_hid_profile_support = false, + .wl_mimo_ps_support = false, + .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, + .bt_rssi_type = COEX_BTRSSI_RATIO, + .ant_isolation = 10, + .rssi_tolerance = 2, + .wl_rssi_step = wl_rssi_step_8821a, + .bt_rssi_step = bt_rssi_step_8821a, + .table_sant_num = ARRAY_SIZE(table_sant_8821a), + .table_sant = table_sant_8821a, + .table_nsant_num = ARRAY_SIZE(table_nsant_8821a), + .table_nsant = table_nsant_8821a, + .tdma_sant_num = ARRAY_SIZE(tdma_sant_8821a), + .tdma_sant = tdma_sant_8821a, + .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8821a), + .tdma_nsant = tdma_nsant_8821a, + .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8821a), + .wl_rf_para_tx = rf_para_tx_8821a, + .wl_rf_para_rx = rf_para_rx_8821a, + .bt_afh_span_bw20 = 0x20, + .bt_afh_span_bw40 = 0x30, + .afh_5g_num = ARRAY_SIZE(afh_5g_8821a), + .afh_5g = afh_5g_8821a, + + .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8821a), + .coex_info_hw_regs = coex_info_hw_regs_8821a, +}; +EXPORT_SYMBOL(rtw8821a_hw_spec); + +MODULE_FIRMWARE("rtw88/rtw8821a_fw.bin"); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8821a/8811a driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a.h b/drivers/net/wireless/realtek/rtw88/rtw8821a.h new file mode 100644 index 000000000000..1b2e548f7234 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821a.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW8821A_H__ +#define __RTW8821A_H__ + +extern const struct rtw_chip_info rtw8821a_hw_spec; + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a_table.c b/drivers/net/wireless/realtek/rtw88/rtw8821a_table.c new file mode 100644 index 000000000000..c8fd8e331f69 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821a_table.c @@ -0,0 +1,2350 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "main.h" +#include "phy.h" +#include "rtw8821a_table.h" + +static const u32 rtw8821a_mac[] = { + 0x421, 0x0000000F, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x00000000, + 0x446, 0x00000000, + 0x447, 0x00000000, + 0x448, 0x00000000, + 0x449, 0x000000F0, + 0x44A, 0x0000000F, + 0x44B, 0x0000003E, + 0x44C, 0x00000010, + 0x44D, 0x00000000, + 0x44E, 0x00000000, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x000000F0, + 0x452, 0x0000000F, + 0x453, 0x00000000, + 0x456, 0x0000005E, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x4C8, 0x0000003F, + 0x4C9, 0x000000FF, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x525, 0x0000004F, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x605, 0x00000030, + 0x607, 0x00000007, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x718, 0x00000040, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8821a_mac, rtw_phy_cfg_mac); + +static const u32 rtw8821a_agc[] = { + 0x81C, 0xBF000001, + 0x81C, 0xBF020001, + 0x81C, 0xBF040001, + 0x81C, 0xBF060001, + 0x81C, 0xBE080001, + 0x81C, 0xBD0A0001, + 0x81C, 0xBC0C0001, + 0x81C, 0xBA0E0001, + 0x81C, 0xB9100001, + 0x81C, 0xB8120001, + 0x81C, 0xB7140001, + 0x81C, 0xB6160001, + 0x81C, 0xB5180001, + 0x81C, 0xB41A0001, + 0x81C, 0xB31C0001, + 0x81C, 0xB21E0001, + 0x81C, 0xB1200001, + 0x81C, 0xB0220001, + 0x81C, 0xAF240001, + 0x81C, 0xAE260001, + 0x81C, 0xAD280001, + 0x81C, 0xAC2A0001, + 0x81C, 0xAB2C0001, + 0x81C, 0xAA2E0001, + 0x81C, 0xA9300001, + 0x81C, 0xA8320001, + 0x81C, 0xA7340001, + 0x81C, 0xA6360001, + 0x81C, 0xA5380001, + 0x81C, 0xA43A0001, + 0x81C, 0x683C0001, + 0x81C, 0x673E0001, + 0x81C, 0x66400001, + 0x81C, 0x65420001, + 0x81C, 0x64440001, + 0x81C, 0x63460001, + 0x81C, 0x62480001, + 0x81C, 0x614A0001, + 0x81C, 0x474C0001, + 0x81C, 0x464E0001, + 0x81C, 0x45500001, + 0x81C, 0x44520001, + 0x81C, 0x43540001, + 0x81C, 0x42560001, + 0x81C, 0x41580001, + 0x81C, 0x285A0001, + 0x81C, 0x275C0001, + 0x81C, 0x265E0001, + 0x81C, 0x25600001, + 0x81C, 0x24620001, + 0x81C, 0x0A640001, + 0x81C, 0x09660001, + 0x81C, 0x08680001, + 0x81C, 0x076A0001, + 0x81C, 0x066C0001, + 0x81C, 0x056E0001, + 0x81C, 0x04700001, + 0x81C, 0x03720001, + 0x81C, 0x02740001, + 0x81C, 0x01760001, + 0x81C, 0x01780001, + 0x81C, 0x017A0001, + 0x81C, 0x017C0001, + 0x81C, 0x017E0001, + 0x8000020c, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000101, + 0x81C, 0xFA020101, + 0x81C, 0xF9040101, + 0x81C, 0xF8060101, + 0x81C, 0xF7080101, + 0x81C, 0xF60A0101, + 0x81C, 0xF50C0101, + 0x81C, 0xF40E0101, + 0x81C, 0xF3100101, + 0x81C, 0xF2120101, + 0x81C, 0xF1140101, + 0x81C, 0xF0160101, + 0x81C, 0xEF180101, + 0x81C, 0xEE1A0101, + 0x81C, 0xED1C0101, + 0x81C, 0xEC1E0101, + 0x81C, 0xEB200101, + 0x81C, 0xEA220101, + 0x81C, 0xE9240101, + 0x81C, 0xE8260101, + 0x81C, 0xE7280101, + 0x81C, 0xE62A0101, + 0x81C, 0xE52C0101, + 0x81C, 0xE42E0101, + 0x81C, 0xE3300101, + 0x81C, 0xA5320101, + 0x81C, 0xA4340101, + 0x81C, 0xA3360101, + 0x81C, 0x87380101, + 0x81C, 0x863A0101, + 0x81C, 0x853C0101, + 0x81C, 0x843E0101, + 0x81C, 0x69400101, + 0x81C, 0x68420101, + 0x81C, 0x67440101, + 0x81C, 0x66460101, + 0x81C, 0x49480101, + 0x81C, 0x484A0101, + 0x81C, 0x474C0101, + 0x81C, 0x2A4E0101, + 0x81C, 0x29500101, + 0x81C, 0x28520101, + 0x81C, 0x27540101, + 0x81C, 0x26560101, + 0x81C, 0x25580101, + 0x81C, 0x245A0101, + 0x81C, 0x235C0101, + 0x81C, 0x055E0101, + 0x81C, 0x04600101, + 0x81C, 0x03620101, + 0x81C, 0x02640101, + 0x81C, 0x01660101, + 0x81C, 0x01680101, + 0x81C, 0x016A0101, + 0x81C, 0x016C0101, + 0x81C, 0x016E0101, + 0x81C, 0x01700101, + 0x81C, 0x01720101, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000101, + 0x81C, 0xFA020101, + 0x81C, 0xF9040101, + 0x81C, 0xF8060101, + 0x81C, 0xF7080101, + 0x81C, 0xF60A0101, + 0x81C, 0xF50C0101, + 0x81C, 0xF40E0101, + 0x81C, 0xF3100101, + 0x81C, 0xF2120101, + 0x81C, 0xF1140101, + 0x81C, 0xF0160101, + 0x81C, 0xEF180101, + 0x81C, 0xEE1A0101, + 0x81C, 0xED1C0101, + 0x81C, 0xEC1E0101, + 0x81C, 0xEB200101, + 0x81C, 0xEA220101, + 0x81C, 0xE9240101, + 0x81C, 0xE8260101, + 0x81C, 0xE7280101, + 0x81C, 0xE62A0101, + 0x81C, 0xE52C0101, + 0x81C, 0xE42E0101, + 0x81C, 0xE3300101, + 0x81C, 0xA5320101, + 0x81C, 0xA4340101, + 0x81C, 0xA3360101, + 0x81C, 0x87380101, + 0x81C, 0x863A0101, + 0x81C, 0x853C0101, + 0x81C, 0x843E0101, + 0x81C, 0x69400101, + 0x81C, 0x68420101, + 0x81C, 0x67440101, + 0x81C, 0x66460101, + 0x81C, 0x49480101, + 0x81C, 0x484A0101, + 0x81C, 0x474C0101, + 0x81C, 0x2A4E0101, + 0x81C, 0x29500101, + 0x81C, 0x28520101, + 0x81C, 0x27540101, + 0x81C, 0x26560101, + 0x81C, 0x25580101, + 0x81C, 0x245A0101, + 0x81C, 0x235C0101, + 0x81C, 0x055E0101, + 0x81C, 0x04600101, + 0x81C, 0x03620101, + 0x81C, 0x02640101, + 0x81C, 0x01660101, + 0x81C, 0x01680101, + 0x81C, 0x016A0101, + 0x81C, 0x016C0101, + 0x81C, 0x016E0101, + 0x81C, 0x01700101, + 0x81C, 0x01720101, + 0xA0000000, 0x00000000, + 0x81C, 0xFF000101, + 0x81C, 0xFF020101, + 0x81C, 0xFE040101, + 0x81C, 0xFD060101, + 0x81C, 0xFC080101, + 0x81C, 0xFD0A0101, + 0x81C, 0xFC0C0101, + 0x81C, 0xFB0E0101, + 0x81C, 0xFA100101, + 0x81C, 0xF9120101, + 0x81C, 0xF8140101, + 0x81C, 0xF7160101, + 0x81C, 0xF6180101, + 0x81C, 0xF51A0101, + 0x81C, 0xF41C0101, + 0x81C, 0xF31E0101, + 0x81C, 0xF2200101, + 0x81C, 0xF1220101, + 0x81C, 0xF0240101, + 0x81C, 0xEF260101, + 0x81C, 0xEE280101, + 0x81C, 0xED2A0101, + 0x81C, 0xEC2C0101, + 0x81C, 0xEB2E0101, + 0x81C, 0xEA300101, + 0x81C, 0xE9320101, + 0x81C, 0xE8340101, + 0x81C, 0xE7360101, + 0x81C, 0xE6380101, + 0x81C, 0xE53A0101, + 0x81C, 0xE43C0101, + 0x81C, 0xE33E0101, + 0x81C, 0xA5400101, + 0x81C, 0xA4420101, + 0x81C, 0xA3440101, + 0x81C, 0x87460101, + 0x81C, 0x86480101, + 0x81C, 0x854A0101, + 0x81C, 0x844C0101, + 0x81C, 0x694E0101, + 0x81C, 0x68500101, + 0x81C, 0x67520101, + 0x81C, 0x66540101, + 0x81C, 0x49560101, + 0x81C, 0x48580101, + 0x81C, 0x475A0101, + 0x81C, 0x2A5C0101, + 0x81C, 0x295E0101, + 0x81C, 0x28600101, + 0x81C, 0x27620101, + 0x81C, 0x26640101, + 0x81C, 0x25660101, + 0x81C, 0x24680101, + 0x81C, 0x236A0101, + 0x81C, 0x056C0101, + 0x81C, 0x046E0101, + 0x81C, 0x03700101, + 0x81C, 0x02720101, + 0xB0000000, 0x00000000, + 0x81C, 0x01740101, + 0x81C, 0x01760101, + 0x81C, 0x01780101, + 0x81C, 0x017A0101, + 0x81C, 0x017C0101, + 0x81C, 0x017E0101, + 0xC50, 0x00000022, + 0xC50, 0x00000020, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8821a_agc, rtw_phy_cfg_agc); + +static const u32 rtw8821a_bb[] = { + 0x800, 0x0020D090, + 0x804, 0x080112E0, + 0x808, 0x0E028211, + 0x80C, 0x92131111, + 0x810, 0x20101261, + 0x814, 0x020C3D10, + 0x818, 0x03A00385, + 0x820, 0x00000000, + 0x824, 0x00030FE0, + 0x828, 0x00000000, + 0x82C, 0x002081DD, + 0x830, 0x2AAAEEC8, + 0x834, 0x0037A706, + 0x838, 0x06489B44, + 0x83C, 0x0000095B, + 0x840, 0xC0000001, + 0x844, 0x40003CDE, + 0x848, 0x62103F8B, + 0x84C, 0x6CFDFFB8, + 0x850, 0x28874706, + 0x854, 0x0001520C, + 0x858, 0x8060E000, + 0x85C, 0x74210168, + 0x860, 0x6929C321, + 0x864, 0x79727432, + 0x868, 0x8CA7A314, + 0x86C, 0x888C2878, + 0x870, 0x08888888, + 0x874, 0x31612C2E, + 0x878, 0x00000152, + 0x87C, 0x000FD000, + 0x8A0, 0x00000013, + 0x8A4, 0x7F7F7F7F, + 0x8A8, 0xA2000338, + 0x8AC, 0x0FF0FA0A, + 0x8B4, 0x000FC080, + 0x8B8, 0x6C10D7FF, + 0x8BC, 0x0CA52090, + 0x8C0, 0x1BF00020, + 0x8C4, 0x00000000, + 0x8C8, 0x00013169, + 0x8CC, 0x08248492, + 0x8D4, 0x940008A0, + 0x8D8, 0x290B5612, + 0x8F8, 0x400002C0, + 0x8FC, 0x00000000, + 0x900, 0x00000700, + 0x90C, 0x00000000, + 0x910, 0x0000FC00, + 0x914, 0x00000404, + 0x918, 0x1C1028C0, + 0x91C, 0x64B11A1C, + 0x920, 0xE0767233, + 0x924, 0x055AA500, + 0x928, 0x00000004, + 0x92C, 0xFFFE0000, + 0x930, 0xFFFFFFFE, + 0x934, 0x001FFFFF, + 0x960, 0x00000000, + 0x964, 0x00000000, + 0x968, 0x00000000, + 0x96C, 0x00000000, + 0x970, 0x801FFFFF, + 0x974, 0x000003FF, + 0x978, 0x00000000, + 0x97C, 0x00000000, + 0x980, 0x00000000, + 0x984, 0x00000000, + 0x988, 0x00000000, + 0x990, 0x27100000, + 0x994, 0xFFFF0100, + 0x998, 0xFFFFFF5C, + 0x99C, 0xFFFFFFFF, + 0x9A0, 0x000000FF, + 0x9A4, 0x00480080, + 0x9A8, 0x00000000, + 0x9AC, 0x00000000, + 0x9B0, 0x81081008, + 0x9B4, 0x01081008, + 0x9B8, 0x01081008, + 0x9BC, 0x01081008, + 0x9D0, 0x00000000, + 0x9D4, 0x00000000, + 0x9D8, 0x00000000, + 0x9DC, 0x00000000, + 0x9E0, 0x00005D00, + 0x9E4, 0x00000003, + 0x9E8, 0x00000001, + 0xA00, 0x00D047C8, + 0xA04, 0x01FF800C, + 0xA08, 0x8C8A8300, + 0xA0C, 0x2E68000F, + 0xA10, 0x9500BB78, + 0xA14, 0x11144028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0000, + 0xA24, 0x090E1317, + 0xA28, 0x00000204, + 0xA2C, 0x00900000, + 0xA70, 0x101FFF00, + 0xA74, 0x00000008, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x21805490, + 0xA84, 0x001F0000, + 0XB00, 0x03100040, + 0XB04, 0x0000B000, + 0XB08, 0xAE0201EB, + 0XB0C, 0x01003207, + 0XB10, 0x00009807, + 0XB14, 0x01000000, + 0XB18, 0x00000002, + 0XB1C, 0x00000002, + 0XB20, 0x0000001F, + 0XB24, 0x03020100, + 0XB28, 0x07060504, + 0XB2C, 0x0B0A0908, + 0XB30, 0x0F0E0D0C, + 0XB34, 0x13121110, + 0XB38, 0x17161514, + 0XB3C, 0x0000003A, + 0XB40, 0x00000000, + 0XB44, 0x00000000, + 0XB48, 0x13000032, + 0XB4C, 0x48080000, + 0XB50, 0x00000000, + 0XB54, 0x00000000, + 0XB58, 0x00000000, + 0XB5C, 0x00000000, + 0xC00, 0x00000007, + 0xC04, 0x00042020, + 0xC08, 0x80410231, + 0xC0C, 0x00000000, + 0xC10, 0x00000100, + 0xC14, 0x01000000, + 0xC1C, 0x40000003, + 0xC20, 0x2C2C2C2C, + 0xC24, 0x30303030, + 0xC28, 0x30303030, + 0xC2C, 0x2C2C2C2C, + 0xC30, 0x2C2C2C2C, + 0xC34, 0x2C2C2C2C, + 0xC38, 0x2C2C2C2C, + 0xC3C, 0x2A2A2A2A, + 0xC40, 0x2A2A2A2A, + 0xC44, 0x2A2A2A2A, + 0xC48, 0x2A2A2A2A, + 0xC4C, 0x2A2A2A2A, + 0xC50, 0x00000020, + 0xC54, 0x001C1208, + 0xC58, 0x30000C1C, + 0xC5C, 0x00000058, + 0xC60, 0x34344443, + 0xC64, 0x07003333, + 0xC68, 0x19791979, + 0xC6C, 0x19791979, + 0xC70, 0x19791979, + 0xC74, 0x19791979, + 0xC78, 0x19791979, + 0xC7C, 0x19791979, + 0xC80, 0x19791979, + 0xC84, 0x19791979, + 0xC94, 0x0100005C, + 0xC98, 0x00000000, + 0xC9C, 0x00000000, + 0xCA0, 0x00000029, + 0xCA4, 0x08040201, + 0xCA8, 0x80402010, + 0xCB0, 0x77775747, + 0xCB4, 0x10000077, + 0xCB8, 0x00508240, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8821a_bb, rtw_phy_cfg_bb); + +static const struct rtw_phy_pg_cfg_pair rtw8821a_bb_pg[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36363838, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363838, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, }, + { 0, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343636, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343636, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343636, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, }, + { 1, 0, 0, 0x00000c44, 0x0000ffff, 0x00002022, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8821a_bb_pg); + +static const u32 rtw8821a_rf_a[] = { + 0x018, 0x0001712A, + 0x056, 0x00051CF2, + 0x066, 0x00040000, + 0x000, 0x00010000, + 0x01E, 0x00080000, + 0x082, 0x00000830, + 0x083, 0x00021800, + 0x084, 0x00028000, + 0x085, 0x00048000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x0009483A, + 0xA0000000, 0x00000000, + 0x086, 0x00094838, + 0xB0000000, 0x00000000, + 0x087, 0x00044980, + 0x088, 0x00048000, + 0x089, 0x0000D480, + 0x08A, 0x00042240, + 0x08B, 0x000F0380, + 0x08C, 0x00090000, + 0x08D, 0x00022852, + 0x08E, 0x00065540, + 0x08F, 0x00088001, + 0x0EF, 0x00020000, + 0x03E, 0x00000380, + 0x03F, 0x00090018, + 0x03E, 0x00020380, + 0x03F, 0x000A0018, + 0x03E, 0x00040308, + 0x03F, 0x000A0018, + 0x03E, 0x00060018, + 0x03F, 0x000A0018, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x089, 0x00000080, + 0x08B, 0x00080180, + 0x0EF, 0x00001000, + 0x03A, 0x00000244, + 0x03B, 0x00038027, + 0x03C, 0x00082000, + 0x03A, 0x00000244, + 0x03B, 0x00030113, + 0x03C, 0x00082000, + 0x03A, 0x0000014C, + 0x03B, 0x00028027, + 0x03C, 0x00082000, + 0x03A, 0x000000CC, + 0x03B, 0x00027027, + 0x03C, 0x00042000, + 0x03A, 0x0000014C, + 0x03B, 0x0001F913, + 0x03C, 0x00042000, + 0x03A, 0x0000010C, + 0x03B, 0x00017F10, + 0x03C, 0x00012000, + 0x03A, 0x000000D0, + 0x03B, 0x00008027, + 0x03C, 0x000CA000, + 0x03A, 0x00000244, + 0x03B, 0x00078027, + 0x03C, 0x00082000, + 0x03A, 0x00000244, + 0x03B, 0x00070113, + 0x03C, 0x00082000, + 0x03A, 0x0000014C, + 0x03B, 0x00068027, + 0x03C, 0x00082000, + 0x03A, 0x000000CC, + 0x03B, 0x00067027, + 0x03C, 0x00042000, + 0x03A, 0x0000014C, + 0x03B, 0x0005F913, + 0x03C, 0x00042000, + 0x03A, 0x0000010C, + 0x03B, 0x00057F10, + 0x03C, 0x00012000, + 0x03A, 0x000000D0, + 0x03B, 0x00048027, + 0x03C, 0x000CA000, + 0x03A, 0x00000244, + 0x03B, 0x000B8027, + 0x03C, 0x00082000, + 0x03A, 0x00000244, + 0x03B, 0x000B0113, + 0x03C, 0x00082000, + 0x03A, 0x0000014C, + 0x03B, 0x000A8027, + 0x03C, 0x00082000, + 0x03A, 0x000000CC, + 0x03B, 0x000A7027, + 0x03C, 0x00042000, + 0x03A, 0x0000014C, + 0x03B, 0x0009F913, + 0x03C, 0x00042000, + 0x03A, 0x0000010C, + 0x03B, 0x00097F10, + 0x03C, 0x00012000, + 0x03A, 0x000000D0, + 0x03B, 0x00088027, + 0x03C, 0x000CA000, + 0x0EF, 0x00000000, + 0x0EF, 0x00001100, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0F3, + 0x034, 0x000490B1, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A0F3, + 0x034, 0x000490B1, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF5, + 0x034, 0x00049DF2, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004ADF3, + 0x034, 0x00049DF0, + 0xA0000000, 0x00000000, + 0x034, 0x0004ADF7, + 0x034, 0x00049DF3, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000480AE, + 0x034, 0x000470AB, + 0x034, 0x0004608B, + 0x034, 0x00045069, + 0x034, 0x00044048, + 0x034, 0x00043045, + 0x034, 0x00042026, + 0x034, 0x00041023, + 0x034, 0x00040002, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000480AE, + 0x034, 0x000470AB, + 0x034, 0x0004608B, + 0x034, 0x00045069, + 0x034, 0x00044048, + 0x034, 0x00043045, + 0x034, 0x00042026, + 0x034, 0x00041023, + 0x034, 0x00040002, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00048DED, + 0x034, 0x00047DEA, + 0x034, 0x00046DE7, + 0x034, 0x00045CE9, + 0x034, 0x00044CE6, + 0x034, 0x000438C6, + 0x034, 0x00042886, + 0x034, 0x00041486, + 0x034, 0x00040447, + 0xA0000000, 0x00000000, + 0x034, 0x00048DEF, + 0x034, 0x00047DEC, + 0x034, 0x00046DE9, + 0x034, 0x00045CCB, + 0x034, 0x0004488D, + 0x034, 0x0004348D, + 0x034, 0x0004248A, + 0x034, 0x0004108D, + 0x034, 0x0004008A, + 0xB0000000, 0x00000000, + 0x80000210, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002ADF4, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0F3, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A0F3, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002ADF4, + 0xA0000000, 0x00000000, + 0x034, 0x0002ADF7, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF4, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF4, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF1, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000290F0, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000290F0, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF1, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00029DF4, + 0xA0000000, 0x00000000, + 0x034, 0x00029DF2, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000280AF, + 0x034, 0x000270AC, + 0x034, 0x0002608B, + 0x034, 0x00025069, + 0x034, 0x00024048, + 0x034, 0x00023045, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020002, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000280AF, + 0x034, 0x000270AC, + 0x034, 0x0002608B, + 0x034, 0x00025069, + 0x034, 0x00024048, + 0x034, 0x00023045, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020002, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00028DF1, + 0x034, 0x00027DEE, + 0x034, 0x00026DEB, + 0x034, 0x00025CEC, + 0x034, 0x00024CE9, + 0x034, 0x000238CA, + 0x034, 0x00022889, + 0x034, 0x00021489, + 0x034, 0x0002044A, + 0xA0000000, 0x00000000, + 0x034, 0x00028DEE, + 0x034, 0x00027DEB, + 0x034, 0x00026CCD, + 0x034, 0x00025CCA, + 0x034, 0x0002488C, + 0x034, 0x0002384C, + 0x034, 0x00022849, + 0x034, 0x00021449, + 0x034, 0x0002004D, + 0xB0000000, 0x00000000, + 0x8000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D7, + 0x034, 0x000090D3, + 0x034, 0x000080B1, + 0x034, 0x000070AE, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D7, + 0x034, 0x000090D3, + 0x034, 0x000080B1, + 0x034, 0x000070AE, + 0xA0000000, 0x00000000, + 0x034, 0x0000ADF7, + 0x034, 0x00009DF4, + 0x034, 0x00008DF1, + 0x034, 0x00007DEE, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000608D, + 0x034, 0x0000506B, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x00002044, + 0x034, 0x00001025, + 0x034, 0x00000004, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000608D, + 0x034, 0x0000506B, + 0x034, 0x0000404A, + 0x034, 0x00003047, + 0x034, 0x00002044, + 0x034, 0x00001025, + 0x034, 0x00000004, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x00006DEB, + 0x034, 0x00005CEC, + 0x034, 0x00004CE9, + 0x034, 0x000038CA, + 0x034, 0x00002889, + 0x034, 0x00001489, + 0x034, 0x0000044A, + 0xA0000000, 0x00000000, + 0x034, 0x00006DCD, + 0x034, 0x00005CCD, + 0x034, 0x00004CCA, + 0x034, 0x0000388C, + 0x034, 0x00002888, + 0x034, 0x00001488, + 0x034, 0x00000486, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000128, + 0x035, 0x00008128, + 0x035, 0x00010128, + 0x035, 0x000201C8, + 0x035, 0x000281C8, + 0x035, 0x000301C8, + 0x035, 0x000401C8, + 0x035, 0x000481C8, + 0x035, 0x000501C8, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000145, + 0x035, 0x00008145, + 0x035, 0x00010145, + 0x035, 0x00020196, + 0x035, 0x00028196, + 0x035, 0x00030196, + 0x035, 0x000401C7, + 0x035, 0x000481C7, + 0x035, 0x000501C7, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000128, + 0x035, 0x00008128, + 0x035, 0x00010128, + 0x035, 0x000201C8, + 0x035, 0x000281C8, + 0x035, 0x000301C8, + 0x035, 0x000401C8, + 0x035, 0x000481C8, + 0x035, 0x000501C8, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x00000187, + 0x035, 0x00008187, + 0x035, 0x00010187, + 0x035, 0x00020188, + 0x035, 0x00028188, + 0x035, 0x00030188, + 0x035, 0x00040188, + 0x035, 0x00048188, + 0x035, 0x00050188, + 0xA0000000, 0x00000000, + 0x035, 0x00000145, + 0x035, 0x00008145, + 0x035, 0x00010145, + 0x035, 0x00020196, + 0x035, 0x00028196, + 0x035, 0x00030196, + 0x035, 0x000401C7, + 0x035, 0x000481C7, + 0x035, 0x000501C7, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000063B5, + 0x036, 0x0000E3B5, + 0x036, 0x000163B5, + 0x036, 0x0001E3B5, + 0x036, 0x000263B5, + 0x036, 0x0002E3B5, + 0x036, 0x000363B5, + 0x036, 0x0003E3B5, + 0x036, 0x000463B5, + 0x036, 0x0004E3B5, + 0x036, 0x000563B5, + 0x036, 0x0005E3B5, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000056B3, + 0x036, 0x0000D6B3, + 0x036, 0x000156B3, + 0x036, 0x0001D6B3, + 0x036, 0x00026634, + 0x036, 0x0002E634, + 0x036, 0x00036634, + 0x036, 0x0003E634, + 0x036, 0x000467B4, + 0x036, 0x0004E7B4, + 0x036, 0x000567B4, + 0x036, 0x0005E7B4, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x000063B5, + 0x036, 0x0000E3B5, + 0x036, 0x000163B5, + 0x036, 0x0001E3B5, + 0x036, 0x000263B5, + 0x036, 0x0002E3B5, + 0x036, 0x000363B5, + 0x036, 0x0003E3B5, + 0x036, 0x000463B5, + 0x036, 0x0004E3B5, + 0x036, 0x000563B5, + 0x036, 0x0005E3B5, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00085733, + 0x036, 0x0008D733, + 0x036, 0x00095733, + 0x036, 0x0009D733, + 0x036, 0x000A64B4, + 0x036, 0x000AE4B4, + 0x036, 0x000B64B4, + 0x036, 0x000BE4B4, + 0x036, 0x000C64B4, + 0x036, 0x000CE4B4, + 0x036, 0x000D64B4, + 0x036, 0x000DE4B4, + 0xA0000000, 0x00000000, + 0x036, 0x000056B3, + 0x036, 0x0000D6B3, + 0x036, 0x000156B3, + 0x036, 0x0001D6B3, + 0x036, 0x00026634, + 0x036, 0x0002E634, + 0x036, 0x00036634, + 0x036, 0x0003E634, + 0x036, 0x000467B4, + 0x036, 0x0004E7B4, + 0x036, 0x000567B4, + 0x036, 0x0005E7B4, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x00000008, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001B6, + 0x03C, 0x00000492, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000022A, + 0x03C, 0x00000594, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001B6, + 0x03C, 0x00000492, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000001C8, + 0x03C, 0x00000492, + 0xA0000000, 0x00000000, + 0x03C, 0x0000022A, + 0x03C, 0x00000594, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000820, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000820, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000800, + 0xA0000000, 0x00000000, + 0x03C, 0x00000900, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000002, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x0004E400, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x0004E400, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x00002000, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x008, 0x0004E400, + 0xA0000000, 0x00000000, + 0x008, 0x00002000, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0DF, 0x000000C0, + 0x01F, 0x00000064, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x058, 0x000A7284, + 0x059, 0x000600EC, + 0xA0000000, 0x00000000, + 0x058, 0x00081184, + 0x059, 0x0006016C, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EFD83, + 0x062, 0x00093FCC, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EAD53, + 0x062, 0x00093BC4, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000EFD83, + 0x062, 0x00093FCC, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x000E8D73, + 0x062, 0x00093FC5, + 0xA0000000, 0x00000000, + 0x061, 0x000EAD53, + 0x062, 0x00093BC4, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110E9, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110E9, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110EB, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110E9, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110E9, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110EB, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000110E9, + 0xA0000000, 0x00000000, + 0x063, 0x000714E9, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C67C, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0x90000410, 0x00000000, 0x40000000, 0x00000000, + 0x064, 0x0001C27C, + 0xA0000000, 0x00000000, + 0x064, 0x0001C67C, + 0xB0000000, 0x00000000, + 0x80000111, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00091016, + 0x90000110, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00091016, + 0x90000210, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093016, + 0x9000020c, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093015, + 0x9000040c, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093015, + 0x90000200, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x00093016, + 0xA0000000, 0x00000000, + 0x065, 0x00091016, + 0xB0000000, 0x00000000, + 0x018, 0x00000006, + 0x0EF, 0x00002000, + 0x03B, 0x0003824B, + 0x03B, 0x0003024B, + 0x03B, 0x0002844B, + 0x03B, 0x00020F4B, + 0x03B, 0x00018F4B, + 0x03B, 0x000104B2, + 0x03B, 0x00008049, + 0x03B, 0x00000148, + 0x03B, 0x0007824B, + 0x03B, 0x0007024B, + 0x03B, 0x0006824B, + 0x03B, 0x00060F4B, + 0x03B, 0x00058F4B, + 0x03B, 0x000504B2, + 0x03B, 0x00048049, + 0x03B, 0x00040148, + 0x0EF, 0x00000000, + 0x0EF, 0x00000100, + 0x034, 0x0000ADF3, + 0x034, 0x00009DF0, + 0x034, 0x00008D70, + 0x034, 0x00007D6D, + 0x034, 0x00006CEE, + 0x034, 0x00005CCC, + 0x034, 0x000044EC, + 0x034, 0x000034AC, + 0x034, 0x0000246D, + 0x034, 0x0000106F, + 0x034, 0x0000006C, + 0x0EF, 0x00000000, + 0x0ED, 0x00000010, + 0x044, 0x0000ADF2, + 0x044, 0x00009DEF, + 0x044, 0x00008DEC, + 0x044, 0x00007DE9, + 0x044, 0x00006CEC, + 0x044, 0x00005CE9, + 0x044, 0x000044EC, + 0x044, 0x000034E9, + 0x044, 0x0000246C, + 0x044, 0x00001469, + 0x044, 0x0000006C, + 0x0ED, 0x00000000, + 0x0ED, 0x00000001, + 0x040, 0x00038DA7, + 0x040, 0x000300C2, + 0x040, 0x000288E2, + 0x040, 0x000200B8, + 0x040, 0x000188A5, + 0x040, 0x00010FBC, + 0x040, 0x00008F71, + 0x040, 0x00000240, + 0x0ED, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000120, + 0x035, 0x00008120, + 0x035, 0x00010120, + 0x036, 0x00000085, + 0x036, 0x00008085, + 0x036, 0x00010085, + 0x036, 0x00018085, + 0x0EF, 0x00000000, + 0x051, 0x00000C31, + 0x052, 0x00000622, + 0x053, 0x000FC70B, + 0x054, 0x0000017E, + 0x056, 0x00051DF3, + 0x051, 0x00000C01, + 0x052, 0x000006D6, + 0x053, 0x000FC649, + 0x070, 0x00049661, + 0x071, 0x0007843E, + 0x072, 0x00000382, + 0x074, 0x00051400, + 0x035, 0x00000160, + 0x035, 0x00008160, + 0x035, 0x00010160, + 0x036, 0x00000124, + 0x036, 0x00008124, + 0x036, 0x00010124, + 0x036, 0x00018124, + 0x0ED, 0x0000000C, + 0x045, 0x00000140, + 0x045, 0x00008140, + 0x045, 0x00010140, + 0x046, 0x00000124, + 0x046, 0x00008124, + 0x046, 0x00010124, + 0x046, 0x00018124, + 0x0DF, 0x00000088, + 0x0B3, 0x000F0E18, + 0x0B4, 0x0001214C, + 0x0B7, 0x0003000C, + 0x01C, 0x000539D2, + 0x0C4, 0x000AFE00, + 0x018, 0x0001F12A, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0x018, 0x0001712A, +}; + +RTW_DECL_TABLE_RF_RADIO(rtw8821a_rf_a, A); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8821a_txpwr_lmt[] = { + { 0, 0, 0, 0, 1, 32, }, + { 2, 0, 0, 0, 1, 28, }, + { 1, 0, 0, 0, 1, 32, }, + { 0, 0, 0, 0, 2, 32, }, + { 2, 0, 0, 0, 2, 28, }, + { 1, 0, 0, 0, 2, 32, }, + { 0, 0, 0, 0, 3, 36, }, + { 2, 0, 0, 0, 3, 28, }, + { 1, 0, 0, 0, 3, 32, }, + { 0, 0, 0, 0, 4, 36, }, + { 2, 0, 0, 0, 4, 28, }, + { 1, 0, 0, 0, 4, 32, }, + { 0, 0, 0, 0, 5, 36, }, + { 2, 0, 0, 0, 5, 28, }, + { 1, 0, 0, 0, 5, 32, }, + { 0, 0, 0, 0, 6, 36, }, + { 2, 0, 0, 0, 6, 28, }, + { 1, 0, 0, 0, 6, 32, }, + { 0, 0, 0, 0, 7, 36, }, + { 2, 0, 0, 0, 7, 28, }, + { 1, 0, 0, 0, 7, 32, }, + { 0, 0, 0, 0, 8, 36, }, + { 2, 0, 0, 0, 8, 28, }, + { 1, 0, 0, 0, 8, 32, }, + { 0, 0, 0, 0, 9, 32, }, + { 2, 0, 0, 0, 9, 28, }, + { 1, 0, 0, 0, 9, 32, }, + { 0, 0, 0, 0, 10, 32, }, + { 2, 0, 0, 0, 10, 28, }, + { 1, 0, 0, 0, 10, 32, }, + { 0, 0, 0, 0, 11, 32, }, + { 2, 0, 0, 0, 11, 28, }, + { 1, 0, 0, 0, 11, 32, }, + { 0, 0, 0, 0, 12, 28, }, + { 2, 0, 0, 0, 12, 28, }, + { 1, 0, 0, 0, 12, 32, }, + { 0, 0, 0, 0, 13, 26, }, + { 2, 0, 0, 0, 13, 28, }, + { 1, 0, 0, 0, 13, 32, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 32, }, + { 0, 0, 0, 1, 1, 30, }, + { 2, 0, 0, 1, 1, 30, }, + { 1, 0, 0, 1, 1, 32, }, + { 0, 0, 0, 1, 2, 30, }, + { 2, 0, 0, 1, 2, 32, }, + { 1, 0, 0, 1, 2, 32, }, + { 0, 0, 0, 1, 3, 32, }, + { 2, 0, 0, 1, 3, 32, }, + { 1, 0, 0, 1, 3, 32, }, + { 0, 0, 0, 1, 4, 32, }, + { 2, 0, 0, 1, 4, 32, }, + { 1, 0, 0, 1, 4, 32, }, + { 0, 0, 0, 1, 5, 32, }, + { 2, 0, 0, 1, 5, 32, }, + { 1, 0, 0, 1, 5, 32, }, + { 0, 0, 0, 1, 6, 32, }, + { 2, 0, 0, 1, 6, 32, }, + { 1, 0, 0, 1, 6, 32, }, + { 0, 0, 0, 1, 7, 32, }, + { 2, 0, 0, 1, 7, 32, }, + { 1, 0, 0, 1, 7, 32, }, + { 0, 0, 0, 1, 8, 32, }, + { 2, 0, 0, 1, 8, 32, }, + { 1, 0, 0, 1, 8, 32, }, + { 0, 0, 0, 1, 9, 30, }, + { 2, 0, 0, 1, 9, 32, }, + { 1, 0, 0, 1, 9, 32, }, + { 0, 0, 0, 1, 10, 30, }, + { 2, 0, 0, 1, 10, 32, }, + { 1, 0, 0, 1, 10, 32, }, + { 0, 0, 0, 1, 11, 30, }, + { 2, 0, 0, 1, 11, 32, }, + { 1, 0, 0, 1, 11, 32, }, + { 0, 0, 0, 1, 12, 26, }, + { 2, 0, 0, 1, 12, 32, }, + { 1, 0, 0, 1, 12, 32, }, + { 0, 0, 0, 1, 13, 24, }, + { 2, 0, 0, 1, 13, 30, }, + { 1, 0, 0, 1, 13, 32, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 26, }, + { 2, 0, 0, 2, 1, 26, }, + { 1, 0, 0, 2, 1, 32, }, + { 0, 0, 0, 2, 2, 26, }, + { 2, 0, 0, 2, 2, 32, }, + { 1, 0, 0, 2, 2, 32, }, + { 0, 0, 0, 2, 3, 32, }, + { 2, 0, 0, 2, 3, 32, }, + { 1, 0, 0, 2, 3, 32, }, + { 0, 0, 0, 2, 4, 32, }, + { 2, 0, 0, 2, 4, 32, }, + { 1, 0, 0, 2, 4, 32, }, + { 0, 0, 0, 2, 5, 32, }, + { 2, 0, 0, 2, 5, 32, }, + { 1, 0, 0, 2, 5, 32, }, + { 0, 0, 0, 2, 6, 32, }, + { 2, 0, 0, 2, 6, 32, }, + { 1, 0, 0, 2, 6, 32, }, + { 0, 0, 0, 2, 7, 32, }, + { 2, 0, 0, 2, 7, 32, }, + { 1, 0, 0, 2, 7, 32, }, + { 0, 0, 0, 2, 8, 32, }, + { 2, 0, 0, 2, 8, 32, }, + { 1, 0, 0, 2, 8, 32, }, + { 0, 0, 0, 2, 9, 26, }, + { 2, 0, 0, 2, 9, 32, }, + { 1, 0, 0, 2, 9, 32, }, + { 0, 0, 0, 2, 10, 26, }, + { 2, 0, 0, 2, 10, 32, }, + { 1, 0, 0, 2, 10, 32, }, + { 0, 0, 0, 2, 11, 26, }, + { 2, 0, 0, 2, 11, 32, }, + { 1, 0, 0, 2, 11, 32, }, + { 0, 0, 0, 2, 12, 26, }, + { 2, 0, 0, 2, 12, 32, }, + { 1, 0, 0, 2, 12, 32, }, + { 0, 0, 0, 2, 13, 24, }, + { 2, 0, 0, 2, 13, 26, }, + { 1, 0, 0, 2, 13, 32, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 30, }, + { 2, 0, 0, 3, 1, 32, }, + { 1, 0, 0, 3, 1, 32, }, + { 0, 0, 0, 3, 2, 32, }, + { 2, 0, 0, 3, 2, 32, }, + { 1, 0, 0, 3, 2, 32, }, + { 0, 0, 0, 3, 3, 32, }, + { 2, 0, 0, 3, 3, 32, }, + { 1, 0, 0, 3, 3, 32, }, + { 0, 0, 0, 3, 4, 32, }, + { 2, 0, 0, 3, 4, 32, }, + { 1, 0, 0, 3, 4, 32, }, + { 0, 0, 0, 3, 5, 32, }, + { 2, 0, 0, 3, 5, 32, }, + { 1, 0, 0, 3, 5, 32, }, + { 0, 0, 0, 3, 6, 32, }, + { 2, 0, 0, 3, 6, 32, }, + { 1, 0, 0, 3, 6, 32, }, + { 0, 0, 0, 3, 7, 32, }, + { 2, 0, 0, 3, 7, 32, }, + { 1, 0, 0, 3, 7, 32, }, + { 0, 0, 0, 3, 8, 32, }, + { 2, 0, 0, 3, 8, 32, }, + { 1, 0, 0, 3, 8, 32, }, + { 0, 0, 0, 3, 9, 32, }, + { 2, 0, 0, 3, 9, 32, }, + { 1, 0, 0, 3, 9, 32, }, + { 0, 0, 0, 3, 10, 32, }, + { 2, 0, 0, 3, 10, 32, }, + { 1, 0, 0, 3, 10, 32, }, + { 0, 0, 0, 3, 11, 30, }, + { 2, 0, 0, 3, 11, 32, }, + { 1, 0, 0, 3, 11, 32, }, + { 0, 0, 0, 3, 12, 63, }, + { 2, 0, 0, 3, 12, 32, }, + { 1, 0, 0, 3, 12, 32, }, + { 0, 0, 0, 3, 13, 63, }, + { 2, 0, 0, 3, 13, 32, }, + { 1, 0, 0, 3, 13, 32, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 26, }, + { 2, 0, 1, 2, 3, 26, }, + { 1, 0, 1, 2, 3, 32, }, + { 0, 0, 1, 2, 4, 26, }, + { 2, 0, 1, 2, 4, 32, }, + { 1, 0, 1, 2, 4, 32, }, + { 0, 0, 1, 2, 5, 26, }, + { 2, 0, 1, 2, 5, 32, }, + { 1, 0, 1, 2, 5, 32, }, + { 0, 0, 1, 2, 6, 32, }, + { 2, 0, 1, 2, 6, 32, }, + { 1, 0, 1, 2, 6, 32, }, + { 0, 0, 1, 2, 7, 32, }, + { 2, 0, 1, 2, 7, 32, }, + { 1, 0, 1, 2, 7, 32, }, + { 0, 0, 1, 2, 8, 32, }, + { 2, 0, 1, 2, 8, 32, }, + { 1, 0, 1, 2, 8, 32, }, + { 0, 0, 1, 2, 9, 26, }, + { 2, 0, 1, 2, 9, 32, }, + { 1, 0, 1, 2, 9, 32, }, + { 0, 0, 1, 2, 10, 24, }, + { 2, 0, 1, 2, 10, 32, }, + { 1, 0, 1, 2, 10, 32, }, + { 0, 0, 1, 2, 11, 22, }, + { 2, 0, 1, 2, 11, 26, }, + { 1, 0, 1, 2, 11, 32, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 63, }, + { 1, 0, 1, 2, 12, 63, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 63, }, + { 1, 0, 1, 2, 13, 63, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 30, }, + { 2, 0, 1, 3, 3, 30, }, + { 1, 0, 1, 3, 3, 30, }, + { 0, 0, 1, 3, 4, 32, }, + { 2, 0, 1, 3, 4, 30, }, + { 1, 0, 1, 3, 4, 30, }, + { 0, 0, 1, 3, 5, 32, }, + { 2, 0, 1, 3, 5, 30, }, + { 1, 0, 1, 3, 5, 30, }, + { 0, 0, 1, 3, 6, 32, }, + { 2, 0, 1, 3, 6, 30, }, + { 1, 0, 1, 3, 6, 30, }, + { 0, 0, 1, 3, 7, 32, }, + { 2, 0, 1, 3, 7, 30, }, + { 1, 0, 1, 3, 7, 30, }, + { 0, 0, 1, 3, 8, 32, }, + { 2, 0, 1, 3, 8, 30, }, + { 1, 0, 1, 3, 8, 30, }, + { 0, 0, 1, 3, 9, 32, }, + { 2, 0, 1, 3, 9, 30, }, + { 1, 0, 1, 3, 9, 30, }, + { 0, 0, 1, 3, 10, 32, }, + { 2, 0, 1, 3, 10, 30, }, + { 1, 0, 1, 3, 10, 30, }, + { 0, 0, 1, 3, 11, 30, }, + { 2, 0, 1, 3, 11, 30, }, + { 1, 0, 1, 3, 11, 30, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 32, }, + { 1, 0, 1, 3, 12, 32, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 32, }, + { 1, 0, 1, 3, 13, 32, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 1, 0, 1, 36, 32, }, + { 2, 1, 0, 1, 36, 30, }, + { 1, 1, 0, 1, 36, 30, }, + { 0, 1, 0, 1, 40, 32, }, + { 2, 1, 0, 1, 40, 30, }, + { 1, 1, 0, 1, 40, 30, }, + { 0, 1, 0, 1, 44, 32, }, + { 2, 1, 0, 1, 44, 30, }, + { 1, 1, 0, 1, 44, 30, }, + { 0, 1, 0, 1, 48, 32, }, + { 2, 1, 0, 1, 48, 30, }, + { 1, 1, 0, 1, 48, 30, }, + { 0, 1, 0, 1, 52, 32, }, + { 2, 1, 0, 1, 52, 30, }, + { 1, 1, 0, 1, 52, 30, }, + { 0, 1, 0, 1, 56, 32, }, + { 2, 1, 0, 1, 56, 30, }, + { 1, 1, 0, 1, 56, 30, }, + { 0, 1, 0, 1, 60, 32, }, + { 2, 1, 0, 1, 60, 30, }, + { 1, 1, 0, 1, 60, 30, }, + { 0, 1, 0, 1, 64, 32, }, + { 2, 1, 0, 1, 64, 30, }, + { 1, 1, 0, 1, 64, 30, }, + { 0, 1, 0, 1, 100, 32, }, + { 2, 1, 0, 1, 100, 30, }, + { 1, 1, 0, 1, 100, 30, }, + { 0, 1, 0, 1, 104, 32, }, + { 2, 1, 0, 1, 104, 30, }, + { 1, 1, 0, 1, 104, 30, }, + { 0, 1, 0, 1, 108, 32, }, + { 2, 1, 0, 1, 108, 30, }, + { 1, 1, 0, 1, 108, 30, }, + { 0, 1, 0, 1, 112, 32, }, + { 2, 1, 0, 1, 112, 30, }, + { 1, 1, 0, 1, 112, 30, }, + { 0, 1, 0, 1, 116, 32, }, + { 2, 1, 0, 1, 116, 30, }, + { 1, 1, 0, 1, 116, 30, }, + { 0, 1, 0, 1, 120, 32, }, + { 2, 1, 0, 1, 120, 30, }, + { 1, 1, 0, 1, 120, 30, }, + { 0, 1, 0, 1, 124, 32, }, + { 2, 1, 0, 1, 124, 30, }, + { 1, 1, 0, 1, 124, 30, }, + { 0, 1, 0, 1, 128, 32, }, + { 2, 1, 0, 1, 128, 30, }, + { 1, 1, 0, 1, 128, 30, }, + { 0, 1, 0, 1, 132, 32, }, + { 2, 1, 0, 1, 132, 30, }, + { 1, 1, 0, 1, 132, 30, }, + { 0, 1, 0, 1, 136, 32, }, + { 2, 1, 0, 1, 136, 30, }, + { 1, 1, 0, 1, 136, 30, }, + { 0, 1, 0, 1, 140, 32, }, + { 2, 1, 0, 1, 140, 30, }, + { 1, 1, 0, 1, 140, 30, }, + { 0, 1, 0, 1, 149, 32, }, + { 2, 1, 0, 1, 149, 30, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 32, }, + { 2, 1, 0, 1, 153, 30, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 32, }, + { 2, 1, 0, 1, 157, 30, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 32, }, + { 2, 1, 0, 1, 161, 30, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 32, }, + { 2, 1, 0, 1, 165, 30, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 32, }, + { 2, 1, 0, 2, 36, 30, }, + { 1, 1, 0, 2, 36, 30, }, + { 0, 1, 0, 2, 40, 32, }, + { 2, 1, 0, 2, 40, 30, }, + { 1, 1, 0, 2, 40, 30, }, + { 0, 1, 0, 2, 44, 32, }, + { 2, 1, 0, 2, 44, 30, }, + { 1, 1, 0, 2, 44, 30, }, + { 0, 1, 0, 2, 48, 32, }, + { 2, 1, 0, 2, 48, 30, }, + { 1, 1, 0, 2, 48, 30, }, + { 0, 1, 0, 2, 52, 32, }, + { 2, 1, 0, 2, 52, 30, }, + { 1, 1, 0, 2, 52, 30, }, + { 0, 1, 0, 2, 56, 32, }, + { 2, 1, 0, 2, 56, 30, }, + { 1, 1, 0, 2, 56, 30, }, + { 0, 1, 0, 2, 60, 32, }, + { 2, 1, 0, 2, 60, 30, }, + { 1, 1, 0, 2, 60, 30, }, + { 0, 1, 0, 2, 64, 32, }, + { 2, 1, 0, 2, 64, 30, }, + { 1, 1, 0, 2, 64, 30, }, + { 0, 1, 0, 2, 100, 32, }, + { 2, 1, 0, 2, 100, 30, }, + { 1, 1, 0, 2, 100, 30, }, + { 0, 1, 0, 2, 104, 32, }, + { 2, 1, 0, 2, 104, 30, }, + { 1, 1, 0, 2, 104, 30, }, + { 0, 1, 0, 2, 108, 32, }, + { 2, 1, 0, 2, 108, 30, }, + { 1, 1, 0, 2, 108, 30, }, + { 0, 1, 0, 2, 112, 32, }, + { 2, 1, 0, 2, 112, 30, }, + { 1, 1, 0, 2, 112, 30, }, + { 0, 1, 0, 2, 116, 32, }, + { 2, 1, 0, 2, 116, 30, }, + { 1, 1, 0, 2, 116, 30, }, + { 0, 1, 0, 2, 120, 32, }, + { 2, 1, 0, 2, 120, 30, }, + { 1, 1, 0, 2, 120, 30, }, + { 0, 1, 0, 2, 124, 32, }, + { 2, 1, 0, 2, 124, 30, }, + { 1, 1, 0, 2, 124, 30, }, + { 0, 1, 0, 2, 128, 32, }, + { 2, 1, 0, 2, 128, 30, }, + { 1, 1, 0, 2, 128, 30, }, + { 0, 1, 0, 2, 132, 32, }, + { 2, 1, 0, 2, 132, 30, }, + { 1, 1, 0, 2, 132, 30, }, + { 0, 1, 0, 2, 136, 32, }, + { 2, 1, 0, 2, 136, 30, }, + { 1, 1, 0, 2, 136, 30, }, + { 0, 1, 0, 2, 140, 32, }, + { 2, 1, 0, 2, 140, 30, }, + { 1, 1, 0, 2, 140, 30, }, + { 0, 1, 0, 2, 149, 32, }, + { 2, 1, 0, 2, 149, 30, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 32, }, + { 2, 1, 0, 2, 153, 30, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 32, }, + { 2, 1, 0, 2, 157, 30, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 32, }, + { 2, 1, 0, 2, 161, 30, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 32, }, + { 2, 1, 0, 2, 165, 30, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 28, }, + { 2, 1, 0, 3, 36, 30, }, + { 1, 1, 0, 3, 36, 30, }, + { 0, 1, 0, 3, 40, 28, }, + { 2, 1, 0, 3, 40, 30, }, + { 1, 1, 0, 3, 40, 30, }, + { 0, 1, 0, 3, 44, 28, }, + { 2, 1, 0, 3, 44, 30, }, + { 1, 1, 0, 3, 44, 30, }, + { 0, 1, 0, 3, 48, 28, }, + { 2, 1, 0, 3, 48, 30, }, + { 1, 1, 0, 3, 48, 30, }, + { 0, 1, 0, 3, 52, 34, }, + { 2, 1, 0, 3, 52, 30, }, + { 1, 1, 0, 3, 52, 30, }, + { 0, 1, 0, 3, 56, 32, }, + { 2, 1, 0, 3, 56, 30, }, + { 1, 1, 0, 3, 56, 30, }, + { 0, 1, 0, 3, 60, 30, }, + { 2, 1, 0, 3, 60, 30, }, + { 1, 1, 0, 3, 60, 30, }, + { 0, 1, 0, 3, 64, 26, }, + { 2, 1, 0, 3, 64, 30, }, + { 1, 1, 0, 3, 64, 30, }, + { 0, 1, 0, 3, 100, 28, }, + { 2, 1, 0, 3, 100, 30, }, + { 1, 1, 0, 3, 100, 30, }, + { 0, 1, 0, 3, 104, 28, }, + { 2, 1, 0, 3, 104, 30, }, + { 1, 1, 0, 3, 104, 30, }, + { 0, 1, 0, 3, 108, 30, }, + { 2, 1, 0, 3, 108, 30, }, + { 1, 1, 0, 3, 108, 30, }, + { 0, 1, 0, 3, 112, 32, }, + { 2, 1, 0, 3, 112, 30, }, + { 1, 1, 0, 3, 112, 30, }, + { 0, 1, 0, 3, 116, 32, }, + { 2, 1, 0, 3, 116, 30, }, + { 1, 1, 0, 3, 116, 30, }, + { 0, 1, 0, 3, 120, 34, }, + { 2, 1, 0, 3, 120, 30, }, + { 1, 1, 0, 3, 120, 30, }, + { 0, 1, 0, 3, 124, 32, }, + { 2, 1, 0, 3, 124, 30, }, + { 1, 1, 0, 3, 124, 30, }, + { 0, 1, 0, 3, 128, 30, }, + { 2, 1, 0, 3, 128, 30, }, + { 1, 1, 0, 3, 128, 30, }, + { 0, 1, 0, 3, 132, 28, }, + { 2, 1, 0, 3, 132, 30, }, + { 1, 1, 0, 3, 132, 30, }, + { 0, 1, 0, 3, 136, 28, }, + { 2, 1, 0, 3, 136, 30, }, + { 1, 1, 0, 3, 136, 30, }, + { 0, 1, 0, 3, 140, 26, }, + { 2, 1, 0, 3, 140, 30, }, + { 1, 1, 0, 3, 140, 30, }, + { 0, 1, 0, 3, 149, 34, }, + { 2, 1, 0, 3, 149, 30, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 34, }, + { 2, 1, 0, 3, 153, 30, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 34, }, + { 2, 1, 0, 3, 157, 30, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 34, }, + { 2, 1, 0, 3, 161, 30, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 34, }, + { 2, 1, 0, 3, 165, 30, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 1, 2, 38, 26, }, + { 2, 1, 1, 2, 38, 30, }, + { 1, 1, 1, 2, 38, 30, }, + { 0, 1, 1, 2, 46, 32, }, + { 2, 1, 1, 2, 46, 30, }, + { 1, 1, 1, 2, 46, 30, }, + { 0, 1, 1, 2, 54, 32, }, + { 2, 1, 1, 2, 54, 30, }, + { 1, 1, 1, 2, 54, 30, }, + { 0, 1, 1, 2, 62, 24, }, + { 2, 1, 1, 2, 62, 30, }, + { 1, 1, 1, 2, 62, 30, }, + { 0, 1, 1, 2, 102, 24, }, + { 2, 1, 1, 2, 102, 30, }, + { 1, 1, 1, 2, 102, 30, }, + { 0, 1, 1, 2, 110, 32, }, + { 2, 1, 1, 2, 110, 30, }, + { 1, 1, 1, 2, 110, 30, }, + { 0, 1, 1, 2, 118, 32, }, + { 2, 1, 1, 2, 118, 30, }, + { 1, 1, 1, 2, 118, 30, }, + { 0, 1, 1, 2, 126, 32, }, + { 2, 1, 1, 2, 126, 30, }, + { 1, 1, 1, 2, 126, 30, }, + { 0, 1, 1, 2, 134, 32, }, + { 2, 1, 1, 2, 134, 30, }, + { 1, 1, 1, 2, 134, 30, }, + { 0, 1, 1, 2, 151, 30, }, + { 2, 1, 1, 2, 151, 30, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 32, }, + { 2, 1, 1, 2, 159, 30, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 28, }, + { 2, 1, 1, 3, 38, 30, }, + { 1, 1, 1, 3, 38, 30, }, + { 0, 1, 1, 3, 46, 28, }, + { 2, 1, 1, 3, 46, 30, }, + { 1, 1, 1, 3, 46, 30, }, + { 0, 1, 1, 3, 54, 30, }, + { 2, 1, 1, 3, 54, 30, }, + { 1, 1, 1, 3, 54, 30, }, + { 0, 1, 1, 3, 62, 30, }, + { 2, 1, 1, 3, 62, 30, }, + { 1, 1, 1, 3, 62, 30, }, + { 0, 1, 1, 3, 102, 26, }, + { 2, 1, 1, 3, 102, 30, }, + { 1, 1, 1, 3, 102, 30, }, + { 0, 1, 1, 3, 110, 30, }, + { 2, 1, 1, 3, 110, 30, }, + { 1, 1, 1, 3, 110, 30, }, + { 0, 1, 1, 3, 118, 34, }, + { 2, 1, 1, 3, 118, 30, }, + { 1, 1, 1, 3, 118, 30, }, + { 0, 1, 1, 3, 126, 32, }, + { 2, 1, 1, 3, 126, 30, }, + { 1, 1, 1, 3, 126, 30, }, + { 0, 1, 1, 3, 134, 30, }, + { 2, 1, 1, 3, 134, 30, }, + { 1, 1, 1, 3, 134, 30, }, + { 0, 1, 1, 3, 151, 34, }, + { 2, 1, 1, 3, 151, 30, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 34, }, + { 2, 1, 1, 3, 159, 30, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 2, 4, 42, 22, }, + { 2, 1, 2, 4, 42, 30, }, + { 1, 1, 2, 4, 42, 30, }, + { 0, 1, 2, 4, 58, 20, }, + { 2, 1, 2, 4, 58, 30, }, + { 1, 1, 2, 4, 58, 30, }, + { 0, 1, 2, 4, 106, 20, }, + { 2, 1, 2, 4, 106, 30, }, + { 1, 1, 2, 4, 106, 30, }, + { 0, 1, 2, 4, 122, 20, }, + { 2, 1, 2, 4, 122, 30, }, + { 1, 1, 2, 4, 122, 30, }, + { 0, 1, 2, 4, 155, 28, }, + { 2, 1, 2, 4, 155, 30, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 28, }, + { 2, 1, 2, 5, 42, 30, }, + { 1, 1, 2, 5, 42, 30, }, + { 0, 1, 2, 5, 58, 26, }, + { 2, 1, 2, 5, 58, 30, }, + { 1, 1, 2, 5, 58, 30, }, + { 0, 1, 2, 5, 106, 28, }, + { 2, 1, 2, 5, 106, 30, }, + { 1, 1, 2, 5, 106, 30, }, + { 0, 1, 2, 5, 122, 32, }, + { 2, 1, 2, 5, 122, 30, }, + { 1, 1, 2, 5, 122, 30, }, + { 0, 1, 2, 5, 155, 34, }, + { 2, 1, 2, 5, 155, 30, }, + { 1, 1, 2, 5, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8821a_txpwr_lmt); + +static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8821a[] = { + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3) | BIT(7), 0}, + {0x0086, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0086, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_POLLING, BIT(1), BIT(1)}, + {0x004A, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), 0}, + {0x0023, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), 0}, + {0x0301, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8821a[] = { + {0x0020, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0067, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), 0}, + {0x0001, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS}, + {0x0000, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4) | BIT(3) | BIT(2), 0}, + {0x0075, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), BIT(1)}, + {0x0075, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4) | BIT(3), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(0), 0}, + {0x004F, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0067, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5) | BIT(4), BIT(5) | BIT(4)}, + {0x0025, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), 0}, + {0x0049, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0063, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0062, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0058, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x005A, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x002E, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x82}, + {0x0010, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), BIT(6)}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_act_to_lps_8821a[] = { + {0x0301, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0xFF}, + {0x0522, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0xFF}, + {0x05F8, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xFF, 0}, + {0x05F9, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xFF, 0}, + {0x05FA, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xFF, 0}, + {0x05FB, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, 0xFF, 0}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_DELAY, 0, RTW_PWR_DELAY_US}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0100, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x03}, + {0x0101, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0093, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x00}, + {0x0553, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8821a[] = { + {0x001F, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0x004F, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0049, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), 0}, + {0x0000, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + {0x0020, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8821a[] = { + {0x0007, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x20}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), BIT(2)}, + {0x004A, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 1}, + {0x0023, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), BIT(4)}, + {0x0086, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0086, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_POLLING, BIT(1), 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +const struct rtw_pwr_seq_cmd * const card_enable_flow_8821a[] = { + trans_carddis_to_cardemu_8821a, + trans_cardemu_to_act_8821a, + NULL +}; + +const struct rtw_pwr_seq_cmd * const enter_lps_flow_8821a[] = { + trans_act_to_lps_8821a, + NULL +}; + +const struct rtw_pwr_seq_cmd * const card_disable_flow_8821a[] = { + trans_act_to_cardemu_8821a, + trans_cardemu_to_carddis_8821a, + NULL +}; + +static const u8 rtw8821a_pwrtrk_5gb_n[][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +static const u8 rtw8821a_pwrtrk_5gb_p[][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +static const u8 rtw8821a_pwrtrk_5ga_n[][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +static const u8 rtw8821a_pwrtrk_5ga_p[][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, + 15, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +static const u8 rtw8821a_pwrtrk_2gb_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10 +}; + +static const u8 rtw8821a_pwrtrk_2gb_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8821a_pwrtrk_2ga_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10 +}; + +static const u8 rtw8821a_pwrtrk_2ga_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8821a_pwrtrk_2g_cck_b_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10 +}; + +static const u8 rtw8821a_pwrtrk_2g_cck_b_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8821a_pwrtrk_2g_cck_a_n[] = { + 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10 +}; + +static const u8 rtw8821a_pwrtrk_2g_cck_a_p[] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12 +}; + +const struct rtw_pwr_track_tbl rtw8821a_rtw_pwr_track_tbl = { + .pwrtrk_5gb_n[0] = rtw8821a_pwrtrk_5gb_n[0], + .pwrtrk_5gb_n[1] = rtw8821a_pwrtrk_5gb_n[1], + .pwrtrk_5gb_n[2] = rtw8821a_pwrtrk_5gb_n[2], + .pwrtrk_5gb_p[0] = rtw8821a_pwrtrk_5gb_p[0], + .pwrtrk_5gb_p[1] = rtw8821a_pwrtrk_5gb_p[1], + .pwrtrk_5gb_p[2] = rtw8821a_pwrtrk_5gb_p[2], + .pwrtrk_5ga_n[0] = rtw8821a_pwrtrk_5ga_n[0], + .pwrtrk_5ga_n[1] = rtw8821a_pwrtrk_5ga_n[1], + .pwrtrk_5ga_n[2] = rtw8821a_pwrtrk_5ga_n[2], + .pwrtrk_5ga_p[0] = rtw8821a_pwrtrk_5ga_p[0], + .pwrtrk_5ga_p[1] = rtw8821a_pwrtrk_5ga_p[1], + .pwrtrk_5ga_p[2] = rtw8821a_pwrtrk_5ga_p[2], + .pwrtrk_2gb_n = rtw8821a_pwrtrk_2gb_n, + .pwrtrk_2gb_p = rtw8821a_pwrtrk_2gb_p, + .pwrtrk_2ga_n = rtw8821a_pwrtrk_2ga_n, + .pwrtrk_2ga_p = rtw8821a_pwrtrk_2ga_p, + .pwrtrk_2g_cckb_n = rtw8821a_pwrtrk_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8821a_pwrtrk_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8821a_pwrtrk_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8821a_pwrtrk_2g_cck_a_p, +}; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821a_table.h b/drivers/net/wireless/realtek/rtw88/rtw8821a_table.h new file mode 100644 index 000000000000..90379ac7a817 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821a_table.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW8821A_TABLE_H__ +#define __RTW8821A_TABLE_H__ + +extern const struct rtw_table rtw8821a_mac_tbl; +extern const struct rtw_table rtw8821a_agc_tbl; +extern const struct rtw_table rtw8821a_bb_tbl; +extern const struct rtw_table rtw8821a_bb_pg_tbl; +extern const struct rtw_table rtw8821a_rf_a_tbl; +extern const struct rtw_table rtw8821a_txpwr_lmt_tbl; + +extern const struct rtw_pwr_seq_cmd * const card_enable_flow_8821a[]; +extern const struct rtw_pwr_seq_cmd * const enter_lps_flow_8821a[]; +extern const struct rtw_pwr_seq_cmd * const card_disable_flow_8821a[]; + +extern const struct rtw_pwr_track_tbl rtw8821a_rtw_pwr_track_tbl; + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821au.c b/drivers/net/wireless/realtek/rtw88/rtw8821au.c new file mode 100644 index 000000000000..730018773e1c --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821au.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include <linux/module.h> +#include <linux/usb.h> +#include "main.h" +#include "rtw8821a.h" +#include "usb.h" + +static const struct usb_device_id rtw_8821au_id_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x011e, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8821a_hw_spec) }, + {}, +}; +MODULE_DEVICE_TABLE(usb, rtw_8821au_id_table); + +static struct usb_driver rtw_8821au_driver = { + .name = "rtw_8821au", + .id_table = rtw_8821au_id_table, + .probe = rtw_usb_probe, + .disconnect = rtw_usb_disconnect, +}; +module_usb_driver(rtw_8821au_driver); + +MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8821au/8811au driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 66c79956e8e5..0270225b9c20 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -1581,13 +1581,6 @@ static const struct rtw_intf_phy_para_table phy_para_table_8821c = { .n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8821c), }; -static const struct rtw_rfe_def rtw8821c_rfe_defs[] = { - [0] = RTW_DEF_RFE(8821c, 0, 0), - [2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2), - [4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2), - [6] = RTW_DEF_RFE(8821c, 0, 0), -}; - static const struct rtw_hw_reg rtw8821c_dig[] = { [0] = { .addr = 0xc50, .mask = 0x7f }, }; @@ -1643,6 +1636,8 @@ static const struct rtw_prioq_addrs prioq_addrs_8821c = { }; static const struct rtw_chip_ops rtw8821c_ops = { + .power_on = rtw_power_on, + .power_off = rtw_power_off, .phy_set_param = rtw8821c_phy_set_param, .read_efuse = rtw8821c_read_efuse, .query_phy_status = query_phy_status, @@ -1897,7 +1892,7 @@ static const u8 rtw8821c_pwrtrk_2g_cck_a_p[] = { 5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9 }; -static const struct rtw_pwr_track_tbl rtw8821c_rtw_pwr_track_tbl = { +static const struct rtw_pwr_track_tbl rtw8821c_pwr_track_type0_tbl = { .pwrtrk_5gb_n[0] = rtw8821c_pwrtrk_5gb_n[0], .pwrtrk_5gb_n[1] = rtw8821c_pwrtrk_5gb_n[1], .pwrtrk_5gb_n[2] = rtw8821c_pwrtrk_5gb_n[2], @@ -1920,6 +1915,13 @@ static const struct rtw_pwr_track_tbl rtw8821c_rtw_pwr_track_tbl = { .pwrtrk_2g_ccka_p = rtw8821c_pwrtrk_2g_cck_a_p, }; +static const struct rtw_rfe_def rtw8821c_rfe_defs[] = { + [0] = RTW_DEF_RFE(8821c, 0, 0, 0), + [2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 0, 2), + [4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 0, 2), + [6] = RTW_DEF_RFE(8821c, 0, 0, 0), +}; + static const struct rtw_reg_domain coex_info_hw_regs_8821c[] = { {0xCB0, MASKDWORD, RTW_REG_DOMAIN_MAC32}, {0xCB4, MASKDWORD, RTW_REG_DOMAIN_MAC32}, @@ -1968,6 +1970,9 @@ const struct rtw_chip_info rtw8821c_hw_spec = { .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, .usb_tx_agg_desc_num = 3, + .hw_feature_report = true, + .c2h_ra_report_size = 7, + .old_datarate_fb_limit = false, .ht_supported = true, .vht_supported = true, .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), @@ -1989,7 +1994,6 @@ const struct rtw_chip_info rtw8821c_hw_spec = { .rfe_defs = rtw8821c_rfe_defs, .rfe_defs_size = ARRAY_SIZE(rtw8821c_rfe_defs), .rx_ldpc = false, - .pwr_track_tbl = &rtw8821c_rtw_pwr_track_tbl, .iqk_threshold = 8, .bfer_su_max_num = 2, .bfer_mu_max_num = 1, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index 91ed921407bb..7a33ebd612ed 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -214,19 +214,10 @@ extern const struct rtw_chip_info rtw8821c_hw_spec; #define BIT_FEN_EN BIT(26) #define REG_INIRTS_RATE_SEL 0x0480 #define REG_HTSTFWT 0x800 -#define REG_RXPSEL 0x808 -#define BIT_RX_PSEL_RST (BIT(28) | BIT(29)) -#define REG_TXPSEL 0x80c #define REG_RXCCAMSK 0x814 -#define REG_CCASEL 0x82c -#define REG_PDMFTH 0x830 -#define REG_CCA2ND 0x838 #define REG_L1WT 0x83c #define REG_L1PKWT 0x840 #define REG_MRC 0x850 -#define REG_CLKTRK 0x860 -#define REG_ADCCLK 0x8ac -#define REG_ADC160 0x8c4 #define REG_ADC40 0x8c8 #define REG_CHFIR 0x8f0 #define REG_CDDTXP 0x93c @@ -234,14 +225,11 @@ extern const struct rtw_chip_info rtw8821c_hw_spec; #define REG_ACBB0 0x948 #define REG_ACBBRXFIR 0x94c #define REG_ACGG2TBL 0x958 -#define REG_FAS 0x9a4 -#define REG_RXSB 0xa00 #define REG_ADCINI 0xa04 #define REG_PWRTH 0xa08 #define REG_CCA_FLTR 0xa20 #define REG_TXSF2 0xa24 #define REG_TXSF6 0xa28 -#define REG_FA_CCK 0xa5c #define REG_RXDESC 0xa2c #define REG_ENTXCCK 0xa80 #define BTG_LNA 0xfc84 @@ -252,12 +240,8 @@ extern const struct rtw_chip_info rtw8821c_hw_spec; #define REG_PWRTH2 0xaa8 #define REG_CSRATIO 0xaaa #define REG_TXFILTER 0xaac -#define REG_CNTRST 0xb58 #define REG_AGCTR_A 0xc08 -#define REG_TXSCALE_A 0xc1c #define REG_TXDFIR 0xc20 -#define REG_RXIGI_A 0xc50 -#define REG_TXAGCIDX 0xc94 #define REG_TRSW 0xca0 #define REG_RFESEL0 0xcb0 #define REG_RFESEL8 0xcb4 @@ -269,14 +253,6 @@ extern const struct rtw_chip_info rtw8821c_hw_spec; #define B_WLA_SWITCH BIT(23) #define REG_RFEINV 0xcbc #define REG_AGCTR_B 0xe08 -#define REG_RXIGI_B 0xe50 -#define REG_CRC_CCK 0xf04 -#define REG_CRC_OFDM 0xf14 -#define REG_CRC_HT 0xf10 -#define REG_CRC_VHT 0xf0c -#define REG_CCA_OFDM 0xf08 -#define REG_FA_OFDM 0xf48 -#define REG_CCA_CCK 0xfcc #define REG_DMEM_CTRL 0x1080 #define BIT_WL_RST BIT(16) #define REG_ANTWT 0x1904 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 24f76a36f23e..739809f4cab5 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -2072,12 +2072,6 @@ static const struct rtw_intf_phy_para_table phy_para_table_8822b = { .n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8822b), }; -static const struct rtw_rfe_def rtw8822b_rfe_defs[] = { - [2] = RTW_DEF_RFE(8822b, 2, 2), - [3] = RTW_DEF_RFE(8822b, 3, 0), - [5] = RTW_DEF_RFE(8822b, 5, 5), -}; - static const struct rtw_hw_reg rtw8822b_dig[] = { [0] = { .addr = 0xc50, .mask = 0x7f }, [1] = { .addr = 0xe50, .mask = 0x7f }, @@ -2132,6 +2126,8 @@ static const struct rtw_prioq_addrs prioq_addrs_8822b = { }; static const struct rtw_chip_ops rtw8822b_ops = { + .power_on = rtw_power_on, + .power_off = rtw_power_off, .phy_set_param = rtw8822b_phy_set_param, .read_efuse = rtw8822b_read_efuse, .query_phy_status = query_phy_status, @@ -2430,7 +2426,7 @@ static const u8 rtw8822b_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = { 10, 11, 11, 12, 12, 13, 13, 14, 14, 15 }; -static const struct rtw_pwr_track_tbl rtw8822b_rtw_pwr_track_tbl = { +static const struct rtw_pwr_track_tbl rtw8822b_pwr_track_type0_tbl = { .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1], .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2], .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8822b_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3], @@ -2453,6 +2449,12 @@ static const struct rtw_pwr_track_tbl rtw8822b_rtw_pwr_track_tbl = { .pwrtrk_2g_ccka_p = rtw8822b_pwrtrk_2g_cck_a_p, }; +static const struct rtw_rfe_def rtw8822b_rfe_defs[] = { + [2] = RTW_DEF_RFE(8822b, 2, 2, 0), + [3] = RTW_DEF_RFE(8822b, 3, 0, 0), + [5] = RTW_DEF_RFE(8822b, 5, 5, 0), +}; + static const struct rtw_reg_domain coex_info_hw_regs_8822b[] = { {0xcb0, MASKDWORD, RTW_REG_DOMAIN_MAC32}, {0xcb4, MASKDWORD, RTW_REG_DOMAIN_MAC32}, @@ -2509,6 +2511,9 @@ const struct rtw_chip_info rtw8822b_hw_spec = { .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, .usb_tx_agg_desc_num = 3, + .hw_feature_report = true, + .c2h_ra_report_size = 7, + .old_datarate_fb_limit = false, .ht_supported = true, .vht_supported = true, .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), @@ -2530,7 +2535,6 @@ const struct rtw_chip_info rtw8822b_hw_spec = { .rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl}, .rfe_defs = rtw8822b_rfe_defs, .rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs), - .pwr_track_tbl = &rtw8822b_rtw_pwr_track_tbl, .iqk_threshold = 8, .bfer_su_max_num = 2, .bfer_mu_max_num = 1, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h index cf85e63966a1..0514958fb57c 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h @@ -151,21 +151,12 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define RTW8822B_EDCCA_MAX 0x7f #define RTW8822B_EDCCA_SRC_DEF 1 #define REG_HTSTFWT 0x800 -#define REG_RXPSEL 0x808 -#define BIT_RX_PSEL_RST (BIT(28) | BIT(29)) -#define REG_TXPSEL 0x80c #define REG_RXCCAMSK 0x814 -#define REG_CCASEL 0x82c -#define REG_PDMFTH 0x830 -#define REG_CCA2ND 0x838 #define REG_L1WT 0x83c #define REG_L1PKWT 0x840 #define REG_MRC 0x850 -#define REG_CLKTRK 0x860 #define REG_EDCCA_POW_MA 0x8a0 #define BIT_MA_LEVEL GENMASK(1, 0) -#define REG_ADCCLK 0x8ac -#define REG_ADC160 0x8c4 #define REG_ADC40 0x8c8 #define REG_EDCCA_DECISION 0x8dc #define BIT_EDCCA_OPTION BIT(5) @@ -176,7 +167,6 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_ACBB0 0x948 #define REG_ACBBRXFIR 0x94c #define REG_ACGG2TBL 0x958 -#define REG_RXSB 0xa00 #define REG_ADCINI 0xa04 #define REG_TXSF2 0xa24 #define REG_TXSF6 0xa28 @@ -184,14 +174,12 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_ENTXCCK 0xa80 #define REG_AGCTR_A 0xc08 #define REG_TXDFIR 0xc20 -#define REG_RXIGI_A 0xc50 #define REG_TRSW 0xca0 #define REG_RFESEL0 0xcb0 #define REG_RFESEL8 0xcb4 #define REG_RFECTL 0xcb8 #define REG_RFEINV 0xcbc #define REG_AGCTR_B 0xe08 -#define REG_RXIGI_B 0xe50 #define REG_ANTWT 0x1904 #define REG_IQKFAILMSK 0x1bf0 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index da74e66bda84..af6b76937f1d 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -4883,16 +4883,6 @@ static const struct rtw_intf_phy_para_table phy_para_table_8822c = { .n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8822c), }; -static const struct rtw_rfe_def rtw8822c_rfe_defs[] = { - [0] = RTW_DEF_RFE(8822c, 0, 0), - [1] = RTW_DEF_RFE(8822c, 0, 0), - [2] = RTW_DEF_RFE(8822c, 0, 0), - [3] = RTW_DEF_RFE(8822c, 0, 0), - [4] = RTW_DEF_RFE(8822c, 0, 0), - [5] = RTW_DEF_RFE(8822c, 0, 5), - [6] = RTW_DEF_RFE(8822c, 0, 0), -}; - static const struct rtw_hw_reg rtw8822c_dig[] = { [0] = { .addr = 0x1d70, .mask = 0x7f }, [1] = { .addr = 0x1d70, .mask = 0x7f00 }, @@ -4947,6 +4937,8 @@ static const struct rtw_prioq_addrs prioq_addrs_8822c = { }; static const struct rtw_chip_ops rtw8822c_ops = { + .power_on = rtw_power_on, + .power_off = rtw_power_off, .phy_set_param = rtw8822c_phy_set_param, .read_efuse = rtw8822c_read_efuse, .query_phy_status = query_phy_status, @@ -5236,7 +5228,7 @@ static const u8 rtw8822c_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = { 18, 18, 19, 20, 21, 22, 23, 24, 24, 25 }; -static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = { +static const struct rtw_pwr_track_tbl rtw8822c_pwr_track_type0_tbl = { .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1], .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2], .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8822c_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3], @@ -5259,6 +5251,16 @@ static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = { .pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p, }; +static const struct rtw_rfe_def rtw8822c_rfe_defs[] = { + [0] = RTW_DEF_RFE(8822c, 0, 0, 0), + [1] = RTW_DEF_RFE(8822c, 0, 0, 0), + [2] = RTW_DEF_RFE(8822c, 0, 0, 0), + [3] = RTW_DEF_RFE(8822c, 0, 0, 0), + [4] = RTW_DEF_RFE(8822c, 0, 0, 0), + [5] = RTW_DEF_RFE(8822c, 0, 5, 0), + [6] = RTW_DEF_RFE(8822c, 0, 0, 0), +}; + static const struct rtw_hw_reg_offset rtw8822c_edcca_th[] = { [EDCCA_TH_L2H_IDX] = { {.addr = 0x84c, .mask = MASKBYTE2}, .offset = 0x80 @@ -5329,6 +5331,9 @@ const struct rtw_chip_info rtw8822c_hw_spec = { .page_size = TX_PAGE_SIZE, .dig_min = 0x20, .usb_tx_agg_desc_num = 3, + .hw_feature_report = true, + .c2h_ra_report_size = 7, + .old_datarate_fb_limit = false, .default_1ss_tx_path = BB_PATH_A, .path_div_supported = true, .ht_supported = true, @@ -5355,7 +5360,6 @@ const struct rtw_chip_info rtw8822c_hw_spec = { .rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs), .en_dis_dpd = true, .dpd_ratemask = DIS_DPD_RATEALL, - .pwr_track_tbl = &rtw8822c_rtw_pwr_track_tbl, .iqk_threshold = 8, .lck_threshold = 8, .bfer_su_max_num = 2, diff --git a/drivers/net/wireless/realtek/rtw88/rtw88xxa.c b/drivers/net/wireless/realtek/rtw88/rtw88xxa.c new file mode 100644 index 000000000000..71e61b9c0bec --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw88xxa.c @@ -0,0 +1,1989 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include <linux/usb.h> +#include "main.h" +#include "coex.h" +#include "phy.h" +#include "rtw88xxa.h" +#include "mac.h" +#include "reg.h" +#include "sec.h" +#include "debug.h" +#include "bf.h" +#include "efuse.h" +#include "usb.h" + +void rtw88xxa_efuse_grant(struct rtw_dev *rtwdev, bool on) +{ + if (on) { + rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON); + + rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR); + rtw_write16_set(rtwdev, REG_SYS_CLKR, + BIT_LOADER_CLK_EN | BIT_ANA8M); + } else { + rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF); + } +} +EXPORT_SYMBOL(rtw88xxa_efuse_grant); + +static void rtw8812a_read_amplifier_type(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + + efuse->ext_pa_2g = (efuse->pa_type_2g & BIT(5)) && + (efuse->pa_type_2g & BIT(4)); + efuse->ext_lna_2g = (efuse->lna_type_2g & BIT(7)) && + (efuse->lna_type_2g & BIT(3)); + + efuse->ext_pa_5g = (efuse->pa_type_5g & BIT(1)) && + (efuse->pa_type_5g & BIT(0)); + efuse->ext_lna_5g = (efuse->lna_type_5g & BIT(7)) && + (efuse->lna_type_5g & BIT(3)); + + /* For rtw_phy_cond2: */ + if (efuse->ext_pa_2g) { + u8 ext_type_pa_2g_a = u8_get_bits(efuse->lna_type_2g, BIT(2)); + u8 ext_type_pa_2g_b = u8_get_bits(efuse->lna_type_2g, BIT(6)); + + efuse->gpa_type = (ext_type_pa_2g_b << 2) | ext_type_pa_2g_a; + } + + if (efuse->ext_pa_5g) { + u8 ext_type_pa_5g_a = u8_get_bits(efuse->lna_type_5g, BIT(2)); + u8 ext_type_pa_5g_b = u8_get_bits(efuse->lna_type_5g, BIT(6)); + + efuse->apa_type = (ext_type_pa_5g_b << 2) | ext_type_pa_5g_a; + } + + if (efuse->ext_lna_2g) { + u8 ext_type_lna_2g_a = u8_get_bits(efuse->lna_type_2g, + BIT(1) | BIT(0)); + u8 ext_type_lna_2g_b = u8_get_bits(efuse->lna_type_2g, + BIT(5) | BIT(4)); + + efuse->glna_type = (ext_type_lna_2g_b << 2) | ext_type_lna_2g_a; + } + + if (efuse->ext_lna_5g) { + u8 ext_type_lna_5g_a = u8_get_bits(efuse->lna_type_5g, + BIT(1) | BIT(0)); + u8 ext_type_lna_5g_b = u8_get_bits(efuse->lna_type_5g, + BIT(5) | BIT(4)); + + efuse->alna_type = (ext_type_lna_5g_b << 2) | ext_type_lna_5g_a; + } +} + +static void rtw8812a_read_rfe_type(struct rtw_dev *rtwdev, + struct rtw88xxa_efuse *map) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + + if (map->rfe_option == 0xff) { + if (rtwdev->hci.type == RTW_HCI_TYPE_USB) + efuse->rfe_option = 0; + else if (rtwdev->hci.type == RTW_HCI_TYPE_PCIE) + efuse->rfe_option = 2; + else + efuse->rfe_option = 4; + } else if (map->rfe_option & BIT(7)) { + if (efuse->ext_lna_5g) { + if (efuse->ext_pa_5g) { + if (efuse->ext_lna_2g && efuse->ext_pa_2g) + efuse->rfe_option = 3; + else + efuse->rfe_option = 0; + } else { + efuse->rfe_option = 2; + } + } else { + efuse->rfe_option = 4; + } + } else { + efuse->rfe_option = map->rfe_option & 0x3f; + + /* Due to other customer already use incorrect EFUSE map for + * their product. We need to add workaround to prevent to + * modify spec and notify all customer to revise the IC 0xca + * content. + */ + if (efuse->rfe_option == 4 && + (efuse->ext_pa_5g || efuse->ext_pa_2g || + efuse->ext_lna_5g || efuse->ext_lna_2g)) { + if (rtwdev->hci.type == RTW_HCI_TYPE_USB) + efuse->rfe_option = 0; + else if (rtwdev->hci.type == RTW_HCI_TYPE_PCIE) + efuse->rfe_option = 2; + } + } +} + +static void rtw88xxa_read_usb_type(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_hal *hal = &rtwdev->hal; + u8 antenna = 0; + u8 wmode = 0; + u8 val8, i; + + efuse->hw_cap.bw = BIT(RTW_CHANNEL_WIDTH_20) | + BIT(RTW_CHANNEL_WIDTH_40) | + BIT(RTW_CHANNEL_WIDTH_80); + efuse->hw_cap.ptcl = EFUSE_HW_CAP_PTCL_VHT; + + if (rtwdev->chip->id == RTW_CHIP_TYPE_8821A) + efuse->hw_cap.nss = 1; + else + efuse->hw_cap.nss = 2; + + if (rtwdev->chip->id == RTW_CHIP_TYPE_8821A) + goto print_hw_cap; + + for (i = 0; i < 2; i++) { + rtw_read8_physical_efuse(rtwdev, 1019 - i, &val8); + + antenna = u8_get_bits(val8, GENMASK(7, 5)); + if (antenna) + break; + antenna = u8_get_bits(val8, GENMASK(3, 1)); + if (antenna) + break; + } + + for (i = 0; i < 2; i++) { + rtw_read8_physical_efuse(rtwdev, 1021 - i, &val8); + + wmode = u8_get_bits(val8, GENMASK(3, 2)); + if (wmode) + break; + } + + if (antenna == 1) { + rtw_info(rtwdev, "This RTL8812AU says it is 1T1R.\n"); + + efuse->hw_cap.nss = 1; + hal->rf_type = RF_1T1R; + hal->rf_path_num = 1; + hal->rf_phy_num = 1; + hal->antenna_tx = BB_PATH_A; + hal->antenna_rx = BB_PATH_A; + } else { + /* Override rtw_chip_parameter_setup(). It detects 8812au as 1T1R. */ + efuse->hw_cap.nss = 2; + hal->rf_type = RF_2T2R; + hal->rf_path_num = 2; + hal->rf_phy_num = 2; + hal->antenna_tx = BB_PATH_AB; + hal->antenna_rx = BB_PATH_AB; + + if (antenna == 2 && wmode == 2) { + rtw_info(rtwdev, "This RTL8812AU says it can't do VHT.\n"); + + /* Can't be EFUSE_HW_CAP_IGNORE and can't be + * EFUSE_HW_CAP_PTCL_VHT, so make it 1. + */ + efuse->hw_cap.ptcl = 1; + efuse->hw_cap.bw &= ~BIT(RTW_CHANNEL_WIDTH_80); + } + } + +print_hw_cap: + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n", + efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl, + efuse->hw_cap.ant_num, efuse->hw_cap.nss); +} + +int rtw88xxa_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) +{ + const struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw88xxa_efuse *map; + int i; + + if (chip->id == RTW_CHIP_TYPE_8812A) + rtwdev->hal.cut_version += 1; + + if (rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE)) + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, + log_map, chip->log_efuse_size, true); + + map = (struct rtw88xxa_efuse *)log_map; + + efuse->rf_board_option = map->rf_board_option; + efuse->crystal_cap = map->xtal_k; + if (efuse->crystal_cap == 0xff) + efuse->crystal_cap = 0x20; + efuse->pa_type_2g = map->pa_type; + efuse->pa_type_5g = map->pa_type; + efuse->lna_type_2g = map->lna_type_2g; + efuse->lna_type_5g = map->lna_type_5g; + if (chip->id == RTW_CHIP_TYPE_8812A) { + rtw8812a_read_amplifier_type(rtwdev); + rtw8812a_read_rfe_type(rtwdev, map); + + efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(1)); + } + efuse->channel_plan = map->channel_plan; + efuse->country_code[0] = map->country_code[0]; + efuse->country_code[1] = map->country_code[1]; + efuse->bt_setting = map->rf_bt_setting; + efuse->regd = map->rf_board_option & 0x7; + efuse->thermal_meter[0] = map->thermal_meter; + efuse->thermal_meter[1] = map->thermal_meter; + efuse->thermal_meter_k = map->thermal_meter; + efuse->tx_bb_swing_setting_2g = map->tx_bb_swing_setting_2g; + efuse->tx_bb_swing_setting_5g = map->tx_bb_swing_setting_5g; + + rtw88xxa_read_usb_type(rtwdev); + + if (chip->id == RTW_CHIP_TYPE_8821A) + efuse->btcoex = rtw_read32_mask(rtwdev, REG_WL_BT_PWR_CTRL, + BIT_BT_FUNC_EN); + else + efuse->btcoex = (map->rf_board_option & 0xe0) == 0x20; + efuse->share_ant = !!(efuse->bt_setting & BIT(0)); + + /* No antenna diversity because it's disabled in the vendor driver */ + efuse->ant_div_cfg = 0; + + efuse->ant_div_type = map->rf_antenna_option; + if (efuse->ant_div_type == 0xff) + efuse->ant_div_type = 0x3; + + for (i = 0; i < 4; i++) + efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i]; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_USB: + if (chip->id == RTW_CHIP_TYPE_8821A) + ether_addr_copy(efuse->addr, map->rtw8821au.mac_addr); + else + ether_addr_copy(efuse->addr, map->rtw8812au.mac_addr); + break; + case RTW_HCI_TYPE_PCIE: + case RTW_HCI_TYPE_SDIO: + default: + /* unsupported now */ + return -EOPNOTSUPP; + } + + return 0; +} +EXPORT_SYMBOL(rtw88xxa_read_efuse); + +static void rtw88xxa_reset_8051(struct rtw_dev *rtwdev) +{ + const struct rtw_chip_info *chip = rtwdev->chip; + u8 val8; + + /* Reset MCU IO Wrapper */ + rtw_write8_clr(rtwdev, REG_RSV_CTRL, BIT(1)); + if (chip->id == RTW_CHIP_TYPE_8812A) + rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT(3)); + else + rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT(0)); + + val8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN + 1); + rtw_write8(rtwdev, REG_SYS_FUNC_EN + 1, val8 & ~BIT(2)); + + /* Enable MCU IO Wrapper */ + rtw_write8_clr(rtwdev, REG_RSV_CTRL, BIT(1)); + if (chip->id == RTW_CHIP_TYPE_8812A) + rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT(3)); + else + rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT(0)); + + rtw_write8(rtwdev, REG_SYS_FUNC_EN + 1, val8 | BIT(2)); +} + +/* A lightweight deinit function */ +static void rtw88xxau_hw_reset(struct rtw_dev *rtwdev) +{ + u8 val8; + + if (!(rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_RAM_DL_SEL)) + return; + + rtw88xxa_reset_8051(rtwdev); + rtw_write8(rtwdev, REG_MCUFW_CTRL, 0x00); + + /* before BB reset should do clock gated */ + rtw_write32_set(rtwdev, REG_FPGA0_XCD_RF_PARA, BIT(6)); + + /* reset BB */ + rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN, BIT(0) | BIT(1)); + + /* reset RF */ + rtw_write8(rtwdev, REG_RF_CTRL, 0); + + /* reset TRX path */ + rtw_write16(rtwdev, REG_CR, 0); + + /* reset MAC, reg0x5[1], auto FSM off */ + rtw_write8_set(rtwdev, REG_APS_FSMCO + 1, APS_FSMCO_MAC_OFF >> 8); + + /* check if reg0x5[1] auto cleared */ + if (read_poll_timeout_atomic(rtw_read8, val8, + !(val8 & (APS_FSMCO_MAC_OFF >> 8)), + 1, 5000, false, + rtwdev, REG_APS_FSMCO + 1)) + rtw_err(rtwdev, "%s: timed out waiting for 0x5[1]\n", __func__); + + /* reg0x5[0], auto FSM on */ + val8 |= APS_FSMCO_MAC_ENABLE >> 8; + rtw_write8(rtwdev, REG_APS_FSMCO + 1, val8); + + rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT(4) | BIT(7)); + rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT(4) | BIT(7)); +} + +static int rtw88xxau_init_power_on(struct rtw_dev *rtwdev) +{ + const struct rtw_chip_info *chip = rtwdev->chip; + u16 val16; + int ret; + + ret = rtw_pwr_seq_parser(rtwdev, chip->pwr_on_seq); + if (ret) { + rtw_err(rtwdev, "power on flow failed\n"); + return ret; + } + + rtw_write16(rtwdev, REG_CR, 0); + val16 = BIT_HCI_TXDMA_EN | BIT_HCI_RXDMA_EN | BIT_TXDMA_EN | + BIT_RXDMA_EN | BIT_PROTOCOL_EN | BIT_SCHEDULE_EN | + BIT_MAC_SEC_EN | BIT_32K_CAL_TMR_EN; + rtw_write16_set(rtwdev, REG_CR, val16); + + if (chip->id == RTW_CHIP_TYPE_8821A) { + if (rtw_read8(rtwdev, REG_SYS_CFG1 + 3) & BIT(0)) + rtw_write8_set(rtwdev, REG_LDO_SWR_CTRL, BIT(6)); + } + + return ret; +} + +static int rtw88xxa_llt_write(struct rtw_dev *rtwdev, u32 address, u32 data) +{ + u32 value = BIT_LLT_WRITE_ACCESS | (address << 8) | data; + int count = 0; + + rtw_write32(rtwdev, REG_LLT_INIT, value); + + do { + if (!rtw_read32_mask(rtwdev, REG_LLT_INIT, BIT(31) | BIT(30))) + break; + + if (count > 20) { + rtw_err(rtwdev, "Failed to poll write LLT done at %d!\n", + address); + return -EBUSY; + } + } while (++count); + + return 0; +} + +static int rtw88xxa_llt_init(struct rtw_dev *rtwdev, u32 boundary) +{ + u32 last_entry = 255; + int status = 0; + u32 i; + + for (i = 0; i < boundary - 1; i++) { + status = rtw88xxa_llt_write(rtwdev, i, i + 1); + if (status) + return status; + } + + status = rtw88xxa_llt_write(rtwdev, boundary - 1, 0xFF); + if (status) + return status; + + for (i = boundary; i < last_entry; i++) { + status = rtw88xxa_llt_write(rtwdev, i, i + 1); + if (status) + return status; + } + + status = rtw88xxa_llt_write(rtwdev, last_entry, boundary); + + return status; +} + +static void rtw88xxau_init_queue_reserved_page(struct rtw_dev *rtwdev) +{ + const struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + const struct rtw_page_table *pg_tbl = NULL; + u16 pubq_num; + u32 val32; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + pg_tbl = &chip->page_table[1]; + break; + case RTW_HCI_TYPE_USB: + if (rtwdev->hci.bulkout_num == 2) + pg_tbl = &chip->page_table[2]; + else if (rtwdev->hci.bulkout_num == 3) + pg_tbl = &chip->page_table[3]; + else if (rtwdev->hci.bulkout_num == 4) + pg_tbl = &chip->page_table[4]; + break; + case RTW_HCI_TYPE_SDIO: + pg_tbl = &chip->page_table[0]; + break; + default: + break; + } + + pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num - + pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num; + + val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num); + rtw_write32(rtwdev, REG_RQPN_NPQ, val32); + + val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num); + rtw_write32(rtwdev, REG_RQPN, val32); +} + +static void rtw88xxau_init_tx_buffer_boundary(struct rtw_dev *rtwdev) +{ + struct rtw_fifo_conf *fifo = &rtwdev->fifo; + + rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary); + rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary); + rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary); + rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary); + rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary); +} + +static int rtw88xxau_init_queue_priority(struct rtw_dev *rtwdev) +{ + const struct rtw_chip_info *chip = rtwdev->chip; + u8 bulkout_num = rtwdev->hci.bulkout_num; + const struct rtw_rqpn *rqpn = NULL; + u16 txdma_pq_map; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + rqpn = &chip->rqpn_table[1]; + break; + case RTW_HCI_TYPE_USB: + if (bulkout_num == 2) + rqpn = &chip->rqpn_table[2]; + else if (bulkout_num == 3) + rqpn = &chip->rqpn_table[3]; + else if (bulkout_num == 4) + rqpn = &chip->rqpn_table[4]; + else + return -EINVAL; + break; + case RTW_HCI_TYPE_SDIO: + rqpn = &chip->rqpn_table[0]; + break; + default: + return -EINVAL; + } + + rtwdev->fifo.rqpn = rqpn; + + txdma_pq_map = rtw_read16(rtwdev, REG_TXDMA_PQ_MAP) & 0x7; + txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi); + txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg); + txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk); + txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be); + txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi); + txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo); + rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map); + + /* Packet in Hi Queue Tx immediately (No constraint for ATIM Period). */ + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && bulkout_num == 4) + rtw_write8(rtwdev, REG_HIQ_NO_LMT_EN, 0xff); + + return 0; +} + +static void rtw88xxa_init_wmac_setting(struct rtw_dev *rtwdev) +{ + rtw_write16(rtwdev, REG_RXFLTMAP0, 0xffff); + rtw_write16(rtwdev, REG_RXFLTMAP1, 0x0400); + rtw_write16(rtwdev, REG_RXFLTMAP2, 0xffff); + + rtw_write32(rtwdev, REG_MAR, 0xffffffff); + rtw_write32(rtwdev, REG_MAR + 4, 0xffffffff); +} + +static void rtw88xxa_init_adaptive_ctrl(struct rtw_dev *rtwdev) +{ + rtw_write32_mask(rtwdev, REG_RRSR, 0xfffff, 0xffff1); + rtw_write16(rtwdev, REG_RETRY_LIMIT, 0x3030); +} + +static void rtw88xxa_init_edca(struct rtw_dev *rtwdev) +{ + rtw_write16(rtwdev, REG_SPEC_SIFS, 0x100a); + rtw_write16(rtwdev, REG_MAC_SPEC_SIFS, 0x100a); + + rtw_write16(rtwdev, REG_SIFS, 0x100a); + rtw_write16(rtwdev, REG_SIFS + 2, 0x100a); + + rtw_write32(rtwdev, REG_EDCA_BE_PARAM, 0x005EA42B); + rtw_write32(rtwdev, REG_EDCA_BK_PARAM, 0x0000A44F); + rtw_write32(rtwdev, REG_EDCA_VI_PARAM, 0x005EA324); + rtw_write32(rtwdev, REG_EDCA_VO_PARAM, 0x002FA226); + + rtw_write8(rtwdev, REG_USTIME_TSF, 0x50); + rtw_write8(rtwdev, REG_USTIME_EDCA, 0x50); +} + +static void rtw88xxau_tx_aggregation(struct rtw_dev *rtwdev) +{ + const struct rtw_chip_info *chip = rtwdev->chip; + + rtw_write32_mask(rtwdev, REG_DWBCN0_CTRL, 0xf0, + chip->usb_tx_agg_desc_num); + + if (chip->id == RTW_CHIP_TYPE_8821A) + rtw_write8(rtwdev, REG_DWBCN1_CTRL, + chip->usb_tx_agg_desc_num << 1); +} + +static void rtw88xxa_init_beacon_parameters(struct rtw_dev *rtwdev) +{ + u16 val16; + + val16 = (BIT_DIS_TSF_UDT << 8) | BIT_DIS_TSF_UDT; + if (rtwdev->efuse.btcoex) + val16 |= BIT_EN_BCN_FUNCTION; + rtw_write16(rtwdev, REG_BCN_CTRL, val16); + + rtw_write32_mask(rtwdev, REG_TBTT_PROHIBIT, 0xfffff, WLAN_TBTT_TIME); + rtw_write8(rtwdev, REG_DRVERLYINT, 0x05); + rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME); + rtw_write16(rtwdev, REG_BCNTCFG, 0x4413); +} + +static void rtw88xxa_phy_bb_config(struct rtw_dev *rtwdev) +{ + u8 val8, crystal_cap; + + /* power on BB/RF domain */ + val8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN); + val8 |= BIT_FEN_USBA; + rtw_write8(rtwdev, REG_SYS_FUNC_EN, val8); + + /* toggle BB reset */ + val8 |= BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST; + rtw_write8(rtwdev, REG_SYS_FUNC_EN, val8); + + rtw_write8(rtwdev, REG_RF_CTRL, + BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB); + rtw_write8(rtwdev, REG_RF_B_CTRL, + BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB); + + rtw_load_table(rtwdev, rtwdev->chip->bb_tbl); + rtw_load_table(rtwdev, rtwdev->chip->agc_tbl); + + crystal_cap = rtwdev->efuse.crystal_cap & 0x3F; + if (rtwdev->chip->id == RTW_CHIP_TYPE_8812A) + rtw_write32_mask(rtwdev, REG_AFE_CTRL3, 0x7FF80000, + crystal_cap | (crystal_cap << 6)); + else + rtw_write32_mask(rtwdev, REG_AFE_CTRL3, 0x00FFF000, + crystal_cap | (crystal_cap << 6)); +} + +static void rtw88xxa_phy_rf_config(struct rtw_dev *rtwdev) +{ + u8 rf_path; + + for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) + rtw_load_table(rtwdev, rtwdev->chip->rf_tbl[rf_path]); +} + +static void rtw8812a_config_1t(struct rtw_dev *rtwdev) +{ + /* BB OFDM RX Path_A */ + rtw_write32_mask(rtwdev, REG_RXPSEL, 0xff, 0x11); + + /* BB OFDM TX Path_A */ + rtw_write32_mask(rtwdev, REG_TXPSEL, MASKLWORD, 0x1111); + + /* BB CCK R/Rx Path_A */ + rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0c000000, 0x0); + + /* MCS support */ + rtw_write32_mask(rtwdev, REG_RX_MCS_LIMIT, 0xc0000060, 0x4); + + /* RF Path_B HSSI OFF */ + rtw_write32_mask(rtwdev, REG_3WIRE_SWB, 0xf, 0x4); + + /* RF Path_B Power Down */ + rtw_write32_mask(rtwdev, REG_LSSI_WRITE_B, MASKDWORD, 0); + + /* ADDA Path_B OFF */ + rtw_write32_mask(rtwdev, REG_AFE_PWR1_B, MASKDWORD, 0); + rtw_write32_mask(rtwdev, REG_AFE_PWR2_B, MASKDWORD, 0); +} + +static const u32 rtw88xxa_txscale_tbl[] = { + 0x081, 0x088, 0x090, 0x099, 0x0a2, 0x0ac, 0x0b6, 0x0c0, 0x0cc, 0x0d8, + 0x0e5, 0x0f2, 0x101, 0x110, 0x120, 0x131, 0x143, 0x156, 0x16a, 0x180, + 0x197, 0x1af, 0x1c8, 0x1e3, 0x200, 0x21e, 0x23e, 0x261, 0x285, 0x2ab, + 0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe +}; + +static u32 rtw88xxa_get_bb_swing(struct rtw_dev *rtwdev, u8 band, u8 path) +{ + static const u32 swing2setting[4] = {0x200, 0x16a, 0x101, 0x0b6}; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 tx_bb_swing; + + if (band == RTW_BAND_2G) + tx_bb_swing = efuse->tx_bb_swing_setting_2g; + else + tx_bb_swing = efuse->tx_bb_swing_setting_5g; + + if (path == RF_PATH_B) + tx_bb_swing >>= 2; + tx_bb_swing &= 0x3; + + return swing2setting[tx_bb_swing]; +} + +static u8 rtw88xxa_get_swing_index(struct rtw_dev *rtwdev) +{ + u32 swing, table_value; + u8 i; + + swing = rtw88xxa_get_bb_swing(rtwdev, rtwdev->hal.current_band_type, + RF_PATH_A); + + for (i = 0; i < ARRAY_SIZE(rtw88xxa_txscale_tbl); i++) { + table_value = rtw88xxa_txscale_tbl[i]; + if (swing == table_value) + return i; + } + + return 24; +} + +static void rtw88xxa_pwrtrack_init(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 path; + + dm_info->default_ofdm_index = rtw88xxa_get_swing_index(rtwdev); + + if (rtwdev->chip->id == RTW_CHIP_TYPE_8821A) + dm_info->default_cck_index = 0; + else + dm_info->default_cck_index = 24; + + for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) { + ewma_thermal_init(&dm_info->avg_thermal[path]); + dm_info->delta_power_index[path] = 0; + dm_info->delta_power_index_last[path] = 0; + } + + dm_info->pwr_trk_triggered = false; + dm_info->pwr_trk_init_trigger = true; + dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; +} + +void rtw88xxa_power_off(struct rtw_dev *rtwdev, + const struct rtw_pwr_seq_cmd *const *enter_lps_flow) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + enum usb_device_speed speed = rtwusb->udev->speed; + u16 ori_fsmc0; + u8 reg_cr; + + reg_cr = rtw_read8(rtwdev, REG_CR); + + /* Already powered off */ + if (reg_cr == 0 || reg_cr == 0xEA) + return; + + rtw_hci_stop(rtwdev); + + if (!rtwdev->efuse.btcoex) + rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC); + + /* set Reg 0xf008[3:4] to 2'11 to enable U1/U2 Mode in USB3.0. */ + if (speed == USB_SPEED_SUPER) + rtw_write8_set(rtwdev, REG_USB_MOD, 0x18); + + rtw_write32(rtwdev, REG_HISR0, 0xffffffff); + rtw_write32(rtwdev, REG_HISR1, 0xffffffff); + rtw_write32(rtwdev, REG_HIMR0, 0); + rtw_write32(rtwdev, REG_HIMR1, 0); + + if (rtwdev->efuse.btcoex) + rtw_coex_power_off_setting(rtwdev); + + ori_fsmc0 = rtw_read16(rtwdev, REG_APS_FSMCO); + rtw_write16(rtwdev, REG_APS_FSMCO, ori_fsmc0 & ~APS_FSMCO_HW_POWERDOWN); + + /* Stop Tx Report Timer. */ + rtw_write8_clr(rtwdev, REG_TX_RPT_CTRL, BIT(1)); + + /* Stop Rx */ + rtw_write8(rtwdev, REG_CR, 0); + + rtw_pwr_seq_parser(rtwdev, enter_lps_flow); + + if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_RAM_DL_SEL) + rtw88xxa_reset_8051(rtwdev); + + rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT(2)); + rtw_write8(rtwdev, REG_MCUFW_CTRL, 0); + + rtw_pwr_seq_parser(rtwdev, rtwdev->chip->pwr_off_seq); + + if (ori_fsmc0 & APS_FSMCO_HW_POWERDOWN) + rtw_write16_set(rtwdev, REG_APS_FSMCO, APS_FSMCO_HW_POWERDOWN); + + clear_bit(RTW_FLAG_POWERON, rtwdev->flags); +} +EXPORT_SYMBOL(rtw88xxa_power_off); + +static void rtw88xxa_set_channel_bb_swing(struct rtw_dev *rtwdev, u8 band) +{ + rtw_write32_mask(rtwdev, REG_TXSCALE_A, BB_SWING_MASK, + rtw88xxa_get_bb_swing(rtwdev, band, RF_PATH_A)); + rtw_write32_mask(rtwdev, REG_TXSCALE_B, BB_SWING_MASK, + rtw88xxa_get_bb_swing(rtwdev, band, RF_PATH_B)); + rtw88xxa_pwrtrack_init(rtwdev); +} + +static void rtw8821a_set_ext_band_switch(struct rtw_dev *rtwdev, u8 band) +{ + rtw_write32_mask(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN, 0); + rtw_write32_mask(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL, 1); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, 0xf, 7); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, 0xf0, 7); + + if (band == RTW_BAND_2G) + rtw_write32_mask(rtwdev, REG_RFE_INV_A, BIT(29) | BIT(28), 1); + else + rtw_write32_mask(rtwdev, REG_RFE_INV_A, BIT(29) | BIT(28), 2); +} + +static void rtw8821a_phy_set_rfe_reg_24g(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + + /* Turn off RF PA and LNA */ + + /* 0xCB0[15:12] = 0x7 (LNA_On)*/ + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, 0xF000, 0x7); + /* 0xCB0[7:4] = 0x7 (PAPE_A)*/ + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, 0xF0, 0x7); + + if (efuse->ext_lna_2g) { + /* Turn on 2.4G External LNA */ + rtw_write32_mask(rtwdev, REG_RFE_INV_A, BIT(20), 1); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, BIT(22), 0); + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, GENMASK(2, 0), 0x2); + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, GENMASK(10, 8), 0x2); + } else { + /* Bypass 2.4G External LNA */ + rtw_write32_mask(rtwdev, REG_RFE_INV_A, BIT(20), 0); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, BIT(22), 0); + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, GENMASK(2, 0), 0x7); + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, GENMASK(10, 8), 0x7); + } +} + +static void rtw8821a_phy_set_rfe_reg_5g(struct rtw_dev *rtwdev) +{ + /* Turn ON RF PA and LNA */ + + /* 0xCB0[15:12] = 0x7 (LNA_On)*/ + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, 0xF000, 0x5); + /* 0xCB0[7:4] = 0x7 (PAPE_A)*/ + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, 0xF0, 0x4); + + /* Bypass 2.4G External LNA */ + rtw_write32_mask(rtwdev, REG_RFE_INV_A, BIT(20), 0); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, BIT(22), 0); + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, GENMASK(2, 0), 0x7); + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, GENMASK(10, 8), 0x7); +} + +static void rtw8812a_phy_set_rfe_reg_24g(struct rtw_dev *rtwdev) +{ + switch (rtwdev->efuse.rfe_option) { + case 0: + case 2: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, RFE_INV_MASK, 0x000); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x000); + break; + case 1: + if (rtwdev->efuse.btcoex) { + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, 0xffffff, 0x777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, 0x33f00000, 0x000); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x000); + } else { + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, RFE_INV_MASK, 0x000); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x000); + } + break; + case 3: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x54337770); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x54337770); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, RFE_INV_MASK, 0x010); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x010); + rtw_write32_mask(rtwdev, REG_ANTSEL_SW, 0x00000303, 0x1); + break; + case 4: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, RFE_INV_MASK, 0x001); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x001); + break; + case 5: + rtw_write8(rtwdev, REG_RFE_PINMUX_A + 2, 0x77); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777); + rtw_write8_clr(rtwdev, REG_RFE_INV_A + 3, BIT(0)); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x000); + break; + case 6: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x07772770); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x07772770); + rtw_write32(rtwdev, REG_RFE_INV_A, 0x00000077); + rtw_write32(rtwdev, REG_RFE_INV_B, 0x00000077); + break; + default: + break; + } +} + +static void rtw8812a_phy_set_rfe_reg_5g(struct rtw_dev *rtwdev) +{ + switch (rtwdev->efuse.rfe_option) { + case 0: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77337717); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77337717); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, RFE_INV_MASK, 0x010); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x010); + break; + case 1: + if (rtwdev->efuse.btcoex) { + rtw_write32_mask(rtwdev, REG_RFE_PINMUX_A, 0xffffff, 0x337717); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77337717); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, 0x33f00000, 0x000); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x000); + } else { + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77337717); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77337717); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, RFE_INV_MASK, 0x000); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x000); + } + break; + case 2: + case 4: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77337777); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77337777); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, RFE_INV_MASK, 0x010); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x010); + break; + case 3: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x54337717); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x54337717); + rtw_write32_mask(rtwdev, REG_RFE_INV_A, RFE_INV_MASK, 0x010); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x010); + rtw_write32_mask(rtwdev, REG_ANTSEL_SW, 0x00000303, 0x1); + break; + case 5: + rtw_write8(rtwdev, REG_RFE_PINMUX_A + 2, 0x33); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77337777); + rtw_write8_set(rtwdev, REG_RFE_INV_A + 3, BIT(0)); + rtw_write32_mask(rtwdev, REG_RFE_INV_B, RFE_INV_MASK, 0x010); + break; + case 6: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x07737717); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x07737717); + rtw_write32(rtwdev, REG_RFE_INV_A, 0x00000077); + rtw_write32(rtwdev, REG_RFE_INV_B, 0x00000077); + break; + default: + break; + } +} + +static void rtw88xxa_switch_band(struct rtw_dev *rtwdev, u8 new_band, u8 bw) +{ + const struct rtw_chip_info *chip = rtwdev->chip; + u16 basic_rates, reg_41a; + + /* 8811au one antenna module doesn't support antenna div, so driver must + * control antenna band, otherwise one of the band will have issue + */ + if (chip->id == RTW_CHIP_TYPE_8821A && !rtwdev->efuse.btcoex && + rtwdev->efuse.ant_div_cfg == 0) + rtw8821a_set_ext_band_switch(rtwdev, new_band); + + if (new_band == RTW_BAND_2G) { + rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST); + + if (chip->id == RTW_CHIP_TYPE_8821A) { + rtw8821a_phy_set_rfe_reg_24g(rtwdev); + + rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0); + } else { + rtw_write32_mask(rtwdev, REG_BWINDICATION, 0x3, 0x1); + rtw_write32_mask(rtwdev, REG_PDMFTH, GENMASK(17, 13), 0x17); + + if (bw == RTW_CHANNEL_WIDTH_20 && + rtwdev->hal.rf_type == RF_1T1R && + !rtwdev->efuse.ext_lna_2g) + rtw_write32_mask(rtwdev, REG_PDMFTH, GENMASK(3, 1), 0x02); + else + rtw_write32_mask(rtwdev, REG_PDMFTH, GENMASK(3, 1), 0x04); + + rtw_write32_mask(rtwdev, REG_CCASEL, 0x3, 0); + + rtw8812a_phy_set_rfe_reg_24g(rtwdev); + } + + rtw_write32_mask(rtwdev, REG_TXPSEL, 0xf0, 0x1); + rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0x1); + + basic_rates = BIT(DESC_RATE1M) | BIT(DESC_RATE2M) | + BIT(DESC_RATE5_5M) | BIT(DESC_RATE11M) | + BIT(DESC_RATE6M) | BIT(DESC_RATE12M) | + BIT(DESC_RATE24M); + rtw_write32_mask(rtwdev, REG_RRSR, 0xfffff, basic_rates); + + rtw_write8_clr(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN); + } else { /* RTW_BAND_5G */ + if (chip->id == RTW_CHIP_TYPE_8821A) + rtw8821a_phy_set_rfe_reg_5g(rtwdev); + + rtw_write8_set(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN); + + read_poll_timeout_atomic(rtw_read16, reg_41a, (reg_41a & 0x30) == 0x30, + 50, 2500, false, rtwdev, REG_TXPKT_EMPTY); + + rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST); + + if (chip->id == RTW_CHIP_TYPE_8821A) { + rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 1); + } else { + rtw_write32_mask(rtwdev, REG_BWINDICATION, 0x3, 0x2); + rtw_write32_mask(rtwdev, REG_PDMFTH, GENMASK(17, 13), 0x15); + rtw_write32_mask(rtwdev, REG_PDMFTH, GENMASK(3, 1), 0x04); + + rtw_write32_mask(rtwdev, REG_CCASEL, 0x3, 1); + + rtw8812a_phy_set_rfe_reg_5g(rtwdev); + } + + rtw_write32_mask(rtwdev, REG_TXPSEL, 0xf0, 0); + rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0xf); + + basic_rates = BIT(DESC_RATE6M) | BIT(DESC_RATE12M) | + BIT(DESC_RATE24M); + rtw_write32_mask(rtwdev, REG_RRSR, 0xfffff, basic_rates); + } + + rtw88xxa_set_channel_bb_swing(rtwdev, new_band); +} + +int rtw88xxa_power_on(struct rtw_dev *rtwdev) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + const struct rtw_chip_info *chip = rtwdev->chip; + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_hal *hal = &rtwdev->hal; + int ret; + + if (test_bit(RTW_FLAG_POWERON, rtwdev->flags)) + return 0; + + /* Override rtw_chip_efuse_info_setup() */ + if (chip->id == RTW_CHIP_TYPE_8821A) + efuse->btcoex = rtw_read32_mask(rtwdev, REG_WL_BT_PWR_CTRL, + BIT_BT_FUNC_EN); + + /* Override rtw_chip_efuse_info_setup() */ + if (chip->id == RTW_CHIP_TYPE_8812A) + rtw8812a_read_amplifier_type(rtwdev); + + ret = rtw_hci_setup(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to setup hci\n"); + goto err; + } + + /* Revise for U2/U3 switch we can not update RF-A/B reset. + * Reset after MAC power on to prevent RF R/W error. + * Is it a right method? + */ + if (chip->id == RTW_CHIP_TYPE_8812A) { + rtw_write8(rtwdev, REG_RF_CTRL, 5); + rtw_write8(rtwdev, REG_RF_CTRL, 7); + rtw_write8(rtwdev, REG_RF_B_CTRL, 5); + rtw_write8(rtwdev, REG_RF_B_CTRL, 7); + } + + /* If HW didn't go through a complete de-initial procedure, + * it probably occurs some problem for double initial + * procedure. + */ + rtw88xxau_hw_reset(rtwdev); + + ret = rtw88xxau_init_power_on(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to power on\n"); + goto err; + } + + ret = rtw_set_trx_fifo_info(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to set trx fifo info\n"); + goto err; + } + + ret = rtw88xxa_llt_init(rtwdev, rtwdev->fifo.rsvd_boundary); + if (ret) { + rtw_err(rtwdev, "failed to init llt\n"); + goto err; + } + + rtw_write32_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN); + + ret = rtw_wait_firmware_completion(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to wait firmware completion\n"); + goto err_off; + } + + ret = rtw_download_firmware(rtwdev, &rtwdev->fw); + if (ret) { + rtw_err(rtwdev, "failed to download firmware\n"); + goto err_off; + } + + rtw_write8(rtwdev, REG_HMETFR, 0xf); + + rtw_load_table(rtwdev, chip->mac_tbl); + + rtw88xxau_init_queue_reserved_page(rtwdev); + rtw88xxau_init_tx_buffer_boundary(rtwdev); + rtw88xxau_init_queue_priority(rtwdev); + + rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, + chip->rxff_size - REPORT_BUF - 1); + + if (chip->id == RTW_CHIP_TYPE_8812A) + rtw_write8(rtwdev, REG_PBP, + u8_encode_bits(PBP_512, PBP_TX_MASK) | + u8_encode_bits(PBP_64, PBP_RX_MASK)); + + rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE); + + rtw_write32(rtwdev, REG_HIMR0, 0); + rtw_write32(rtwdev, REG_HIMR1, 0); + + rtw_write32_mask(rtwdev, REG_CR, 0x30000, 0x2); + + rtw88xxa_init_wmac_setting(rtwdev); + rtw88xxa_init_adaptive_ctrl(rtwdev); + rtw88xxa_init_edca(rtwdev); + + rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL, BIT(7)); + rtw_write8(rtwdev, REG_ACKTO, 0x80); + + rtw88xxau_tx_aggregation(rtwdev); + + rtw88xxa_init_beacon_parameters(rtwdev); + rtw_write8(rtwdev, REG_BCN_MAX_ERR, 0xff); + + rtw_hci_interface_cfg(rtwdev); + + /* usb3 rx interval */ + rtw_write8(rtwdev, REG_USB3_RXITV, 0x01); + + /* burst length=4, set 0x3400 for burst length=2 */ + rtw_write16(rtwdev, REG_RXDMA_STATUS, 0x7400); + rtw_write8(rtwdev, REG_RXDMA_STATUS + 1, 0xf5); + + /* 0x456 = 0x70, sugguested by Zhilin */ + if (chip->id == RTW_CHIP_TYPE_8821A) + rtw_write8(rtwdev, REG_AMPDU_MAX_TIME, 0x5e); + else + rtw_write8(rtwdev, REG_AMPDU_MAX_TIME, 0x70); + + rtw_write32(rtwdev, REG_AMPDU_MAX_LENGTH, 0xffffffff); + rtw_write8(rtwdev, REG_USTIME_TSF, 0x50); + rtw_write8(rtwdev, REG_USTIME_EDCA, 0x50); + + if (rtwusb->udev->speed == USB_SPEED_SUPER) + /* Disable U1/U2 Mode to avoid 2.5G spur in USB3.0. */ + rtw_write8_clr(rtwdev, REG_USB_MOD, BIT(4) | BIT(3)); + + rtw_write8_set(rtwdev, REG_SINGLE_AMPDU_CTRL, BIT_EN_SINGLE_APMDU); + + /* for VHT packet length 11K */ + rtw_write8(rtwdev, REG_RX_PKT_LIMIT, 0x18); + + rtw_write8(rtwdev, REG_PIFS, 0x00); + + if (chip->id == RTW_CHIP_TYPE_8821A) { + /* 0x0a0a too small, it can't pass AC logo. change to 0x1f1f */ + rtw_write16(rtwdev, REG_MAX_AGGR_NUM, 0x1f1f); + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL, 0x80); + rtw_write32(rtwdev, REG_FAST_EDCA_CTRL, 0x03087777); + } else { + rtw_write16(rtwdev, REG_MAX_AGGR_NUM, 0x1f1f); + rtw_write8_clr(rtwdev, REG_FWHW_TXQ_CTRL, BIT(7)); + } + + /* to prevent mac is reseted by bus. */ + rtw_write8_set(rtwdev, REG_RSV_CTRL, BIT(5) | BIT(6)); + + /* ARFB table 9 for 11ac 5G 2SS */ + rtw_write32(rtwdev, REG_ARFR0, 0x00000010); + rtw_write32(rtwdev, REG_ARFRH0, 0xfffff000); + + /* ARFB table 10 for 11ac 5G 1SS */ + rtw_write32(rtwdev, REG_ARFR1_V1, 0x00000010); + rtw_write32(rtwdev, REG_ARFRH1_V1, 0x003ff000); + + /* ARFB table 11 for 11ac 24G 1SS */ + rtw_write32(rtwdev, REG_ARFR2_V1, 0x00000015); + rtw_write32(rtwdev, REG_ARFRH2_V1, 0x003ff000); + + /* ARFB table 12 for 11ac 24G 2SS */ + rtw_write32(rtwdev, REG_ARFR3_V1, 0x00000015); + rtw_write32(rtwdev, REG_ARFRH3_V1, 0xffcff000); + + rtw_write8_set(rtwdev, REG_CR, BIT_MACTXEN | BIT_MACRXEN); + + rtw88xxa_phy_bb_config(rtwdev); + rtw88xxa_phy_rf_config(rtwdev); + + if (chip->id == RTW_CHIP_TYPE_8812A && hal->rf_path_num == 1) + rtw8812a_config_1t(rtwdev); + + rtw88xxa_switch_band(rtwdev, RTW_BAND_2G, RTW_CHANNEL_WIDTH_20); + + rtw_write32(rtwdev, RTW_SEC_CMD_REG, BIT(31) | BIT(30)); + + rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0xff); + rtw_write32(rtwdev, REG_BAR_MODE_CTRL, 0x0201ffff); + rtw_write8(rtwdev, REG_NAV_CTRL + 2, 0); + + rtw_write8_clr(rtwdev, REG_GPIO_MUXCFG, BIT(5)); + + rtw_phy_init(rtwdev); + + rtw88xxa_pwrtrack_init(rtwdev); + + /* 0x4c6[3] 1: RTS BW = Data BW + * 0: RTS BW depends on CCA / secondary CCA result. + */ + rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT(3)); + + /* enable Tx report. */ + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, 0x0f); + + /* Pretx_en, for WEP/TKIP SEC */ + rtw_write8(rtwdev, REG_EARLY_MODE_CONTROL + 3, 0x01); + + rtw_write16(rtwdev, REG_TX_RPT_TIME, 0x3df0); + + /* Reset USB mode switch setting */ + rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x0); + rtw_write8(rtwdev, REG_ACLK_MON, 0x0); + + rtw_write8(rtwdev, REG_USB_HRPWM, 0); + + /* ack for xmit mgmt frames. */ + rtw_write32_set(rtwdev, REG_FWHW_TXQ_CTRL, BIT(12)); + + hal->cck_high_power = rtw_read32_mask(rtwdev, REG_CCK_RPT_FORMAT, + BIT_CCK_RPT_FORMAT); + + ret = rtw_hci_start(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to start hci\n"); + goto err_off; + } + + if (efuse->btcoex) { + rtw_coex_power_on_setting(rtwdev); + rtw_coex_init_hw_config(rtwdev, false); + } + + set_bit(RTW_FLAG_POWERON, rtwdev->flags); + + return 0; + +err_off: + chip->ops->power_off(rtwdev); + +err: + return ret; +} +EXPORT_SYMBOL(rtw88xxa_power_on); + +u32 rtw88xxa_phy_read_rf(struct rtw_dev *rtwdev, + enum rtw_rf_path rf_path, u32 addr, u32 mask) +{ + static const u32 pi_addr[2] = { REG_3WIRE_SWA, REG_3WIRE_SWB }; + static const u32 read_addr[2][2] = { + { REG_SI_READ_A, REG_SI_READ_B }, + { REG_PI_READ_A, REG_PI_READ_B } + }; + const struct rtw_chip_info *chip = rtwdev->chip; + const struct rtw_hal *hal = &rtwdev->hal; + bool set_cca, pi_mode; + u32 val; + + if (rf_path >= hal->rf_phy_num) { + rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path); + return INV_RF_DATA; + } + + /* CCA off to avoid reading the wrong value. + * Toggling CCA would affect RF 0x0, skip it. + */ + set_cca = addr != 0x0 && chip->id == RTW_CHIP_TYPE_8812A && + hal->cut_version != RTW_CHIP_VER_CUT_C; + + if (set_cca) + rtw_write32_set(rtwdev, REG_CCA2ND, BIT(3)); + + addr &= 0xff; + + pi_mode = rtw_read32_mask(rtwdev, pi_addr[rf_path], 0x4); + + rtw_write32_mask(rtwdev, REG_HSSI_READ, MASKBYTE0, addr); + + if (chip->id == RTW_CHIP_TYPE_8821A || + hal->cut_version == RTW_CHIP_VER_CUT_C) + udelay(20); + + val = rtw_read32_mask(rtwdev, read_addr[pi_mode][rf_path], mask); + + /* CCA on */ + if (set_cca) + rtw_write32_clr(rtwdev, REG_CCA2ND, BIT(3)); + + return val; +} +EXPORT_SYMBOL(rtw88xxa_phy_read_rf); + +static void rtw8812a_phy_fix_spur(struct rtw_dev *rtwdev, u8 channel, u8 bw) +{ + /* C cut Item12 ADC FIFO CLOCK */ + if (rtwdev->hal.cut_version == RTW_CHIP_VER_CUT_C) { + if (bw == RTW_CHANNEL_WIDTH_40 && channel == 11) + rtw_write32_mask(rtwdev, REG_ADCCLK, 0xC00, 0x3); + else + rtw_write32_mask(rtwdev, REG_ADCCLK, 0xC00, 0x2); + + /* A workaround to resolve 2480Mhz spur by setting ADC clock + * as 160M. + */ + if (bw == RTW_CHANNEL_WIDTH_20 && (channel == 13 || channel == 14)) { + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x300, 0x3); + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 1); + } else if (bw == RTW_CHANNEL_WIDTH_40 && channel == 11) { + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 1); + } else if (bw != RTW_CHANNEL_WIDTH_80) { + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x300, 0x2); + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0); + } + } else { + /* A workaround to resolve 2480Mhz spur by setting ADC clock + * as 160M. + */ + if (bw == RTW_CHANNEL_WIDTH_20 && (channel == 13 || channel == 14)) + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x300, 0x3); + else if (channel <= 14) /* 2.4G only */ + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x300, 0x2); + } +} + +static void rtw88xxa_switch_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw) +{ + struct rtw_hal *hal = &rtwdev->hal; + u32 fc_area, rf_mod_ag; + u8 path; + + switch (channel) { + case 36 ... 48: + fc_area = 0x494; + break; + case 50 ... 64: + fc_area = 0x453; + break; + case 100 ... 116: + fc_area = 0x452; + break; + default: + if (channel >= 118) + fc_area = 0x412; + else + fc_area = 0x96a; + break; + } + + rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, fc_area); + + for (path = 0; path < hal->rf_path_num; path++) { + switch (channel) { + case 36 ... 64: + rf_mod_ag = 0x101; + break; + case 100 ... 140: + rf_mod_ag = 0x301; + break; + default: + if (channel > 140) + rf_mod_ag = 0x501; + else + rf_mod_ag = 0x000; + break; + } + + rtw_write_rf(rtwdev, path, RF_CFGCH, + RF18_RFSI_MASK | RF18_BAND_MASK, rf_mod_ag); + + if (rtwdev->chip->id == RTW_CHIP_TYPE_8812A) + rtw8812a_phy_fix_spur(rtwdev, channel, bw); + + rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_CHANNEL_MASK, channel); + } +} + +static void rtw88xxa_set_reg_bw(struct rtw_dev *rtwdev, u8 bw) +{ + u16 val16 = rtw_read16(rtwdev, REG_WMAC_TRXPTCL_CTL); + + val16 &= ~BIT_RFMOD; + if (bw == RTW_CHANNEL_WIDTH_80) + val16 |= BIT_RFMOD_80M; + else if (bw == RTW_CHANNEL_WIDTH_40) + val16 |= BIT_RFMOD_40M; + + rtw_write16(rtwdev, REG_WMAC_TRXPTCL_CTL, val16); +} + +static void rtw88xxa_post_set_bw_mode(struct rtw_dev *rtwdev, u8 channel, + u8 bw, u8 primary_chan_idx) +{ + struct rtw_hal *hal = &rtwdev->hal; + u8 txsc40 = 0, txsc20, txsc; + u8 reg_837, l1pkval; + + rtw88xxa_set_reg_bw(rtwdev, bw); + + txsc20 = primary_chan_idx; + if (bw == RTW_CHANNEL_WIDTH_80) { + if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST) + txsc40 = RTW_SC_40_UPPER; + else + txsc40 = RTW_SC_40_LOWER; + } + + txsc = BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40); + rtw_write8(rtwdev, REG_DATA_SC, txsc); + + reg_837 = rtw_read8(rtwdev, REG_BWINDICATION + 3); + + switch (bw) { + default: + case RTW_CHANNEL_WIDTH_20: + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x003003C3, 0x00300200); + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0); + + if (hal->rf_type == RF_2T2R) + rtw_write32_mask(rtwdev, REG_L1PKTH, 0x03C00000, 7); + else + rtw_write32_mask(rtwdev, REG_L1PKTH, 0x03C00000, 8); + + break; + case RTW_CHANNEL_WIDTH_40: + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x003003C3, 0x00300201); + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0); + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x3C, txsc); + rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf0000000, txsc); + + if (reg_837 & BIT(2)) { + l1pkval = 6; + } else { + if (hal->rf_type == RF_2T2R) + l1pkval = 7; + else + l1pkval = 8; + } + + rtw_write32_mask(rtwdev, REG_L1PKTH, 0x03C00000, l1pkval); + + if (txsc == RTW_SC_20_UPPER) + rtw_write32_set(rtwdev, REG_RXSB, BIT(4)); + else + rtw_write32_clr(rtwdev, REG_RXSB, BIT(4)); + + break; + case RTW_CHANNEL_WIDTH_80: + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x003003C3, 0x00300202); + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 1); + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x3C, txsc); + rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf0000000, txsc); + + if (reg_837 & BIT(2)) { + l1pkval = 5; + } else { + if (hal->rf_type == RF_2T2R) + l1pkval = 6; + else + l1pkval = 7; + } + + rtw_write32_mask(rtwdev, REG_L1PKTH, 0x03C00000, l1pkval); + + break; + } +} + +static void rtw88xxa_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw) +{ + u8 path; + + for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) { + switch (bw) { + case RTW_CHANNEL_WIDTH_5: + case RTW_CHANNEL_WIDTH_10: + case RTW_CHANNEL_WIDTH_20: + default: + rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 3); + break; + case RTW_CHANNEL_WIDTH_40: + rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 1); + break; + case RTW_CHANNEL_WIDTH_80: + rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 0); + break; + } + } +} + +void rtw88xxa_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw, + u8 primary_chan_idx) +{ + u8 old_band, new_band; + + if (rtw_read8(rtwdev, REG_CCK_CHECK) & BIT_CHECK_CCK_EN) + old_band = RTW_BAND_5G; + else + old_band = RTW_BAND_2G; + + if (channel > 14) + new_band = RTW_BAND_5G; + else + new_band = RTW_BAND_2G; + + if (new_band != old_band) + rtw88xxa_switch_band(rtwdev, new_band, bw); + + rtw88xxa_switch_channel(rtwdev, channel, bw); + + rtw88xxa_post_set_bw_mode(rtwdev, channel, bw, primary_chan_idx); + + if (rtwdev->chip->id == RTW_CHIP_TYPE_8812A) + rtw8812a_phy_fix_spur(rtwdev, channel, bw); + + rtw88xxa_set_channel_rf(rtwdev, channel, bw); +} +EXPORT_SYMBOL(rtw88xxa_set_channel); + +void rtw88xxa_query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, + struct rtw_rx_pkt_stat *pkt_stat, + s8 (*cck_rx_pwr)(u8 lna_idx, u8 vga_idx)) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_jaguar_phy_status_rpt *rpt; + u8 gain[RTW_RF_PATH_MAX], rssi, i; + s8 rx_pwr_db, power_a, power_b; + const s8 min_rx_power = -120; + u8 lna_idx, vga_idx; + + rpt = (struct rtw_jaguar_phy_status_rpt *)phy_status; + + if (pkt_stat->rate <= DESC_RATE11M) { + lna_idx = le32_get_bits(rpt->w1, RTW_JGRPHY_W1_AGC_RPT_LNA_IDX); + vga_idx = le32_get_bits(rpt->w1, RTW_JGRPHY_W1_AGC_RPT_VGA_IDX); + + rx_pwr_db = cck_rx_pwr(lna_idx, vga_idx); + + pkt_stat->rx_power[RF_PATH_A] = rx_pwr_db; + pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); + dm_info->rssi[RF_PATH_A] = pkt_stat->rssi; + pkt_stat->bw = RTW_CHANNEL_WIDTH_20; + pkt_stat->signal_power = rx_pwr_db; + } else { /* OFDM rate */ + gain[RF_PATH_A] = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_GAIN_A); + gain[RF_PATH_B] = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_GAIN_B); + + for (i = RF_PATH_A; i < rtwdev->hal.rf_path_num; i++) { + pkt_stat->rx_power[i] = gain[i] - 110; + rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[i], 1); + dm_info->rssi[i] = rssi; + } + + pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, + rtwdev->hal.rf_path_num); + + power_a = pkt_stat->rx_power[RF_PATH_A]; + power_b = pkt_stat->rx_power[RF_PATH_B]; + if (rtwdev->hal.rf_path_num == 1) + power_b = power_a; + + pkt_stat->signal_power = max3(power_a, power_b, min_rx_power); + } +} +EXPORT_SYMBOL(rtw88xxa_query_phy_status); + +static void +rtw88xxa_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, + u8 rs, u32 *phy_pwr_idx) +{ + static const u32 offset_txagc[2] = { + REG_TX_AGC_A_CCK_11_CCK_1, REG_TX_AGC_B_CCK_11_CCK_1 + }; + u8 rate, rate_idx, pwr_index, shift; + struct rtw_hal *hal = &rtwdev->hal; + bool write_1ss_mcs9; + u32 mask; + int j; + + for (j = 0; j < rtw_rate_size[rs]; j++) { + rate = rtw_rate_section[rs][j]; + + pwr_index = hal->tx_pwr_tbl[path][rate]; + + shift = rate & 0x3; + *phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); + + write_1ss_mcs9 = rate == DESC_RATEVHT1SS_MCS9 && + hal->rf_path_num == 1; + + if (write_1ss_mcs9) + mask = MASKLWORD; + else + mask = MASKDWORD; + + if (shift == 0x3 || write_1ss_mcs9) { + rate_idx = rate & 0xfc; + if (rate >= DESC_RATEVHT1SS_MCS0) + rate_idx -= 0x10; + + rtw_write32_mask(rtwdev, offset_txagc[path] + rate_idx, + mask, *phy_pwr_idx); + + *phy_pwr_idx = 0; + } + } +} + +static void rtw88xxa_tx_power_training(struct rtw_dev *rtwdev, u8 bw, + u8 channel, u8 path) +{ + static const u32 write_offset[] = { + REG_TX_PWR_TRAINING_A, REG_TX_PWR_TRAINING_B, + }; + u32 power_level, write_data; + u8 i; + + power_level = rtwdev->hal.tx_pwr_tbl[path][DESC_RATEMCS7]; + write_data = 0; + + for (i = 0; i < 3; i++) { + if (i == 0) + power_level -= 10; + else if (i == 1) + power_level -= 8; + else + power_level -= 6; + + write_data |= max_t(u32, power_level, 2) << (i * 8); + } + + rtw_write32_mask(rtwdev, write_offset[path], 0xffffff, write_data); +} + +void rtw88xxa_set_tx_power_index(struct rtw_dev *rtwdev) +{ + struct rtw_hal *hal = &rtwdev->hal; + u32 phy_pwr_idx = 0; + int rs, path; + + for (path = 0; path < hal->rf_path_num; path++) { + for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) { + if (hal->rf_path_num == 1 && + (rs == RTW_RATE_SECTION_HT_2S || + rs == RTW_RATE_SECTION_VHT_2S)) + continue; + + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags) && + rs > RTW_RATE_SECTION_OFDM) + continue; + + if (hal->current_band_type == RTW_BAND_5G && + rs == RTW_RATE_SECTION_CCK) + continue; + + rtw88xxa_set_tx_power_index_by_rate(rtwdev, path, rs, + &phy_pwr_idx); + } + + rtw88xxa_tx_power_training(rtwdev, hal->current_band_width, + hal->current_channel, path); + } +} +EXPORT_SYMBOL(rtw88xxa_set_tx_power_index); + +void rtw88xxa_false_alarm_statistics(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 cck_fa_cnt, ofdm_fa_cnt; + u32 crc32_cnt, cca32_cnt; + u32 cck_enable; + + cck_enable = rtw_read32(rtwdev, REG_RXPSEL) & BIT(28); + cck_fa_cnt = rtw_read16(rtwdev, REG_FA_CCK); + ofdm_fa_cnt = rtw_read16(rtwdev, REG_FA_OFDM); + + dm_info->cck_fa_cnt = cck_fa_cnt; + dm_info->ofdm_fa_cnt = ofdm_fa_cnt; + dm_info->total_fa_cnt = ofdm_fa_cnt; + if (cck_enable) + dm_info->total_fa_cnt += cck_fa_cnt; + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK); + dm_info->cck_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD); + dm_info->cck_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD); + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_OFDM); + dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD); + dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD); + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_HT); + dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD); + dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD); + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_VHT); + dm_info->vht_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD); + dm_info->vht_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD); + + cca32_cnt = rtw_read32(rtwdev, REG_CCA_OFDM); + dm_info->ofdm_cca_cnt = u32_get_bits(cca32_cnt, MASKHWORD); + dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt; + if (cck_enable) { + cca32_cnt = rtw_read32(rtwdev, REG_CCA_CCK); + dm_info->cck_cca_cnt = u32_get_bits(cca32_cnt, MASKLWORD); + dm_info->total_cca_cnt += dm_info->cck_cca_cnt; + } + + rtw_write32_set(rtwdev, REG_FAS, BIT(17)); + rtw_write32_clr(rtwdev, REG_FAS, BIT(17)); + rtw_write32_clr(rtwdev, REG_CCK0_FAREPORT, BIT(15)); + rtw_write32_set(rtwdev, REG_CCK0_FAREPORT, BIT(15)); + rtw_write32_set(rtwdev, REG_CNTRST, BIT(0)); + rtw_write32_clr(rtwdev, REG_CNTRST, BIT(0)); +} +EXPORT_SYMBOL(rtw88xxa_false_alarm_statistics); + +void rtw88xxa_iqk_backup_mac_bb(struct rtw_dev *rtwdev, + u32 *macbb_backup, + const u32 *backup_macbb_reg, + u32 macbb_num) +{ + u32 i; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + /* save MACBB default value */ + for (i = 0; i < macbb_num; i++) + macbb_backup[i] = rtw_read32(rtwdev, backup_macbb_reg[i]); +} +EXPORT_SYMBOL(rtw88xxa_iqk_backup_mac_bb); + +void rtw88xxa_iqk_backup_afe(struct rtw_dev *rtwdev, u32 *afe_backup, + const u32 *backup_afe_reg, u32 afe_num) +{ + u32 i; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + /* Save AFE Parameters */ + for (i = 0; i < afe_num; i++) + afe_backup[i] = rtw_read32(rtwdev, backup_afe_reg[i]); +} +EXPORT_SYMBOL(rtw88xxa_iqk_backup_afe); + +void rtw88xxa_iqk_restore_mac_bb(struct rtw_dev *rtwdev, + u32 *macbb_backup, + const u32 *backup_macbb_reg, + u32 macbb_num) +{ + u32 i; + + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + /* Reload MacBB Parameters */ + for (i = 0; i < macbb_num; i++) + rtw_write32(rtwdev, backup_macbb_reg[i], macbb_backup[i]); +} +EXPORT_SYMBOL(rtw88xxa_iqk_restore_mac_bb); + +void rtw88xxa_iqk_configure_mac(struct rtw_dev *rtwdev) +{ + /* [31] = 0 --> Page C */ + rtw_write32_mask(rtwdev, REG_CCASEL, BIT(31), 0x0); + + rtw_write8(rtwdev, REG_TXPAUSE, 0x3f); + rtw_write32_mask(rtwdev, REG_BCN_CTRL, + (BIT_EN_BCN_FUNCTION << 8) | BIT_EN_BCN_FUNCTION, 0x0); + + /* RX ante off */ + rtw_write8(rtwdev, REG_RXPSEL, 0x00); + + /* CCA off */ + rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf, 0xc); + + /* CCK RX path off */ + rtw_write8(rtwdev, REG_CCK_RX + 3, 0xf); +} +EXPORT_SYMBOL(rtw88xxa_iqk_configure_mac); + +bool rtw88xxa_iqk_finish(int average, int threshold, + int *x_temp, int *y_temp, int *x, int *y, + bool break_inner, bool break_outer) +{ + bool finish = false; + int i, ii, dx, dy; + + for (i = 0; i < average; i++) { + for (ii = i + 1; ii < average; ii++) { + dx = abs_diff(x_temp[i] >> 21, x_temp[ii] >> 21); + dy = abs_diff(y_temp[i] >> 21, y_temp[ii] >> 21); + + if (dx < threshold && dy < threshold) { + *x = ((x_temp[i] >> 21) + (x_temp[ii] >> 21)); + *y = ((y_temp[i] >> 21) + (y_temp[ii] >> 21)); + + *x /= 2; + *y /= 2; + + finish = true; + + if (break_inner) + break; + } + } + + if (finish && break_outer) + break; + } + + return finish; +} +EXPORT_SYMBOL(rtw88xxa_iqk_finish); + +static void rtw88xxa_pwrtrack_set(struct rtw_dev *rtwdev, u8 tx_rate, u8 path) +{ + static const u32 reg_txscale[2] = { REG_TXSCALE_A, REG_TXSCALE_B }; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 cck_swing_idx, ofdm_swing_idx; + u8 pwr_tracking_limit; + + switch (tx_rate) { + case DESC_RATE1M ... DESC_RATE11M: + pwr_tracking_limit = 32; + break; + case DESC_RATE6M ... DESC_RATE48M: + case DESC_RATEMCS3 ... DESC_RATEMCS4: + case DESC_RATEMCS11 ... DESC_RATEMCS12: + case DESC_RATEVHT1SS_MCS3 ... DESC_RATEVHT1SS_MCS4: + case DESC_RATEVHT2SS_MCS3 ... DESC_RATEVHT2SS_MCS4: + pwr_tracking_limit = 30; + break; + case DESC_RATE54M: + case DESC_RATEMCS5 ... DESC_RATEMCS7: + case DESC_RATEMCS13 ... DESC_RATEMCS15: + case DESC_RATEVHT1SS_MCS5 ... DESC_RATEVHT1SS_MCS6: + case DESC_RATEVHT2SS_MCS5 ... DESC_RATEVHT2SS_MCS6: + pwr_tracking_limit = 28; + break; + case DESC_RATEMCS0 ... DESC_RATEMCS2: + case DESC_RATEMCS8 ... DESC_RATEMCS10: + case DESC_RATEVHT1SS_MCS0 ... DESC_RATEVHT1SS_MCS2: + case DESC_RATEVHT2SS_MCS0 ... DESC_RATEVHT2SS_MCS2: + pwr_tracking_limit = 34; + break; + case DESC_RATEVHT1SS_MCS7: + case DESC_RATEVHT2SS_MCS7: + pwr_tracking_limit = 26; + break; + default: + case DESC_RATEVHT1SS_MCS8: + case DESC_RATEVHT2SS_MCS8: + pwr_tracking_limit = 24; + break; + case DESC_RATEVHT1SS_MCS9: + case DESC_RATEVHT2SS_MCS9: + pwr_tracking_limit = 22; + break; + } + + cck_swing_idx = dm_info->delta_power_index[path] + dm_info->default_cck_index; + ofdm_swing_idx = dm_info->delta_power_index[path] + dm_info->default_ofdm_index; + + if (ofdm_swing_idx > pwr_tracking_limit) { + if (path == RF_PATH_A) + dm_info->txagc_remnant_cck = cck_swing_idx - pwr_tracking_limit; + dm_info->txagc_remnant_ofdm[path] = ofdm_swing_idx - pwr_tracking_limit; + + ofdm_swing_idx = pwr_tracking_limit; + } else if (ofdm_swing_idx == 0) { + if (path == RF_PATH_A) + dm_info->txagc_remnant_cck = cck_swing_idx; + dm_info->txagc_remnant_ofdm[path] = ofdm_swing_idx; + } else { + if (path == RF_PATH_A) + dm_info->txagc_remnant_cck = 0; + dm_info->txagc_remnant_ofdm[path] = 0; + } + + rtw_write32_mask(rtwdev, reg_txscale[path], GENMASK(31, 21), + rtw88xxa_txscale_tbl[ofdm_swing_idx]); +} + +void rtw88xxa_phy_pwrtrack(struct rtw_dev *rtwdev, + void (*do_lck)(struct rtw_dev *rtwdev), + void (*do_iqk)(struct rtw_dev *rtwdev)) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_hal *hal = &rtwdev->hal; + struct rtw_swing_table swing_table; + s8 remnant_pre[RTW_RF_PATH_MAX]; + u8 thermal_value, delta, path; + bool need_iqk; + + rtw_phy_config_swing_table(rtwdev, &swing_table); + + if (rtwdev->efuse.thermal_meter[0] == 0xff) { + pr_err_once("efuse thermal meter is 0xff\n"); + return; + } + + thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00); + + rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A); + + need_iqk = rtw_phy_pwrtrack_need_iqk(rtwdev); + + if (need_iqk && do_lck) + do_lck(rtwdev); + + if (dm_info->pwr_trk_init_trigger) + dm_info->pwr_trk_init_trigger = false; + else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value, + RF_PATH_A)) + goto iqk; + + delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A); + + for (path = RF_PATH_A; path < hal->rf_path_num; path++) { + remnant_pre[path] = dm_info->txagc_remnant_ofdm[path]; + + dm_info->delta_power_index[path] = + rtw_phy_pwrtrack_get_pwridx(rtwdev, &swing_table, path, + RF_PATH_A, delta); + + if (dm_info->delta_power_index[path] != + dm_info->delta_power_index_last[path]) { + dm_info->delta_power_index_last[path] = + dm_info->delta_power_index[path]; + + rtw88xxa_pwrtrack_set(rtwdev, dm_info->tx_rate, path); + } + } + + for (path = RF_PATH_A; path < hal->rf_path_num; path++) { + if (remnant_pre[path] != dm_info->txagc_remnant_ofdm[path]) { + rtw_phy_set_tx_power_level(rtwdev, + hal->current_channel); + break; + } + } + +iqk: + if (need_iqk) + do_iqk(rtwdev); +} +EXPORT_SYMBOL(rtw88xxa_phy_pwrtrack); + +void rtw88xxa_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) +{ + static const u8 pd[CCK_PD_LV_MAX] = {0x40, 0x83, 0xcd, 0xdd, 0xed}; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + /* Override rtw_phy_cck_pd_lv_link(). It implements something + * like type 2/3/4. We need type 1 here. + */ + if (rtw_is_assoc(rtwdev)) { + if (dm_info->min_rssi > 60) { + new_lvl = CCK_PD_LV3; + } else if (dm_info->min_rssi > 35) { + new_lvl = CCK_PD_LV2; + } else if (dm_info->min_rssi > 20) { + if (dm_info->cck_fa_avg > 500) + new_lvl = CCK_PD_LV2; + else if (dm_info->cck_fa_avg < 250) + new_lvl = CCK_PD_LV1; + else + return; + } else { + new_lvl = CCK_PD_LV1; + } + } + + rtw_dbg(rtwdev, RTW_DBG_PHY, "lv: (%d) -> (%d)\n", + dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A], new_lvl); + + if (dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] == new_lvl) + return; + + dm_info->cck_fa_avg = CCK_FA_AVG_RESET; + dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] = new_lvl; + + rtw_write8(rtwdev, REG_CCK_PD_TH, pd[new_lvl]); +} +EXPORT_SYMBOL(rtw88xxa_phy_cck_pd_set); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8821a/8811a/8812a common code"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw88xxa.h b/drivers/net/wireless/realtek/rtw88/rtw88xxa.h new file mode 100644 index 000000000000..09a45c1a4129 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw88xxa.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW88XXA_H__ +#define __RTW88XXA_H__ + +#include <asm/byteorder.h> +#include "reg.h" + +struct rtw8821au_efuse { + u8 res4[48]; /* 0xd0 */ + u8 vid[2]; /* 0x100 */ + u8 pid[2]; + u8 res8[3]; + u8 mac_addr[ETH_ALEN]; /* 0x107 */ + u8 res9[243]; +} __packed; + +struct rtw8812au_efuse { + u8 vid[2]; /* 0xd0 */ + u8 pid[2]; /* 0xd2 */ + u8 res0[3]; + u8 mac_addr[ETH_ALEN]; /* 0xd7 */ + u8 res1[291]; +} __packed; + +struct rtw88xxa_efuse { + __le16 rtl_id; + u8 res0[6]; /* 0x02 */ + u8 usb_mode; /* 0x08 */ + u8 res1[7]; /* 0x09 */ + + /* power index for four RF paths */ + struct rtw_txpwr_idx txpwr_idx_table[4]; + + u8 channel_plan; /* 0xb8 */ + u8 xtal_k; + u8 thermal_meter; + u8 iqk_lck; + u8 pa_type; /* 0xbc */ + u8 lna_type_2g; /* 0xbd */ + u8 res2; + u8 lna_type_5g; /* 0xbf */ + u8 res3; + u8 rf_board_option; /* 0xc1 */ + u8 rf_feature_option; + u8 rf_bt_setting; + u8 eeprom_version; + u8 eeprom_customer_id; /* 0xc5 */ + u8 tx_bb_swing_setting_2g; + u8 tx_bb_swing_setting_5g; + u8 tx_pwr_calibrate_rate; + u8 rf_antenna_option; /* 0xc9 */ + u8 rfe_option; + u8 country_code[2]; + u8 res4[3]; + union { + struct rtw8821au_efuse rtw8821au; + struct rtw8812au_efuse rtw8812au; + }; +} __packed; + +static_assert(sizeof(struct rtw88xxa_efuse) == 512); + +#define WLAN_BCN_DMA_TIME 0x02 +#define WLAN_TBTT_PROHIBIT 0x04 +#define WLAN_TBTT_HOLD_TIME 0x064 +#define WLAN_TBTT_TIME (WLAN_TBTT_PROHIBIT |\ + (WLAN_TBTT_HOLD_TIME << BIT_SHIFT_TBTT_HOLD_TIME_AP)) + +struct rtw_jaguar_phy_status_rpt { + __le32 w0; + __le32 w1; + __le32 w2; + __le32 w3; + __le32 w4; + __le32 w5; + __le32 w6; +} __packed; + +#define RTW_JGRPHY_W0_GAIN_A GENMASK(6, 0) +#define RTW_JGRPHY_W0_TRSW_A BIT(7) +#define RTW_JGRPHY_W0_GAIN_B GENMASK(14, 8) +#define RTW_JGRPHY_W0_TRSW_B BIT(15) +#define RTW_JGRPHY_W0_CHL_NUM GENMASK(25, 16) +#define RTW_JGRPHY_W0_SUB_CHNL GENMASK(29, 26) +#define RTW_JGRPHY_W0_R_RFMOD GENMASK(31, 30) + +/* CCK: */ +#define RTW_JGRPHY_W1_SIG_QUAL GENMASK(7, 0) +#define RTW_JGRPHY_W1_AGC_RPT_VGA_IDX GENMASK(12, 8) +#define RTW_JGRPHY_W1_AGC_RPT_LNA_IDX GENMASK(15, 13) +#define RTW_JGRPHY_W1_BB_POWER GENMASK(23, 16) +/* OFDM: */ +#define RTW_JGRPHY_W1_PWDB_ALL GENMASK(7, 0) +#define RTW_JGRPHY_W1_CFO_SHORT_A GENMASK(15, 8) /* s8 */ +#define RTW_JGRPHY_W1_CFO_SHORT_B GENMASK(23, 16) /* s8 */ +#define RTW_JGRPHY_W1_BT_RF_CH_MSB GENMASK(31, 30) + +#define RTW_JGRPHY_W2_ANT_DIV_SW_A BIT(0) +#define RTW_JGRPHY_W2_ANT_DIV_SW_B BIT(1) +#define RTW_JGRPHY_W2_BT_RF_CH_LSB GENMASK(7, 2) +#define RTW_JGRPHY_W2_CFO_TAIL_A GENMASK(15, 8) /* s8 */ +#define RTW_JGRPHY_W2_CFO_TAIL_B GENMASK(23, 16) /* s8 */ +#define RTW_JGRPHY_W2_PCTS_MSK_RPT_0 GENMASK(31, 24) + +#define RTW_JGRPHY_W3_PCTS_MSK_RPT_1 GENMASK(7, 0) +/* Stream 1 and 2 RX EVM: */ +#define RTW_JGRPHY_W3_RXEVM_1 GENMASK(15, 8) /* s8 */ +#define RTW_JGRPHY_W3_RXEVM_2 GENMASK(23, 16) /* s8 */ +#define RTW_JGRPHY_W3_RXSNR_A GENMASK(31, 24) /* s8 */ + +#define RTW_JGRPHY_W4_RXSNR_B GENMASK(7, 0) /* s8 */ +#define RTW_JGRPHY_W4_PCTS_MSK_RPT_2 GENMASK(21, 8) +#define RTW_JGRPHY_W4_PCTS_RPT_VALID BIT(22) +#define RTW_JGRPHY_W4_RXEVM_3 GENMASK(31, 24) /* s8 */ + +#define RTW_JGRPHY_W5_RXEVM_4 GENMASK(7, 0) /* s8 */ +/* 8812a, stream 1 and 2 CSI: */ +#define RTW_JGRPHY_W5_CSI_CURRENT_1 GENMASK(15, 8) +#define RTW_JGRPHY_W5_CSI_CURRENT_2 GENMASK(23, 16) +/* 8814a: */ +#define RTW_JGRPHY_W5_RXSNR_C GENMASK(15, 8) /* s8 */ +#define RTW_JGRPHY_W5_RXSNR_D GENMASK(23, 16) /* s8 */ +#define RTW_JGRPHY_W5_GAIN_C GENMASK(30, 24) +#define RTW_JGRPHY_W5_TRSW_C BIT(31) + +#define RTW_JGRPHY_W6_GAIN_D GENMASK(6, 0) +#define RTW_JGRPHY_W6_TRSW_D BIT(7) +#define RTW_JGRPHY_W6_SIGEVM GENMASK(15, 8) /* s8 */ +#define RTW_JGRPHY_W6_ANTIDX_ANTC GENMASK(18, 16) +#define RTW_JGRPHY_W6_ANTIDX_ANTD GENMASK(21, 19) +#define RTW_JGRPHY_W6_DPDT_CTRL_KEEP BIT(22) +#define RTW_JGRPHY_W6_GNT_BT_KEEP BIT(23) +#define RTW_JGRPHY_W6_ANTIDX_ANTA GENMASK(26, 24) +#define RTW_JGRPHY_W6_ANTIDX_ANTB GENMASK(29, 27) +#define RTW_JGRPHY_W6_HW_ANTSW_OCCUR GENMASK(31, 30) + +#define RF18_BW_MASK (BIT(11) | BIT(10)) + +void rtw88xxa_efuse_grant(struct rtw_dev *rtwdev, bool on); +int rtw88xxa_read_efuse(struct rtw_dev *rtwdev, u8 *log_map); +void rtw88xxa_power_off(struct rtw_dev *rtwdev, + const struct rtw_pwr_seq_cmd *const *enter_lps_flow); +int rtw88xxa_power_on(struct rtw_dev *rtwdev); +u32 rtw88xxa_phy_read_rf(struct rtw_dev *rtwdev, + enum rtw_rf_path rf_path, u32 addr, u32 mask); +void rtw88xxa_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw, + u8 primary_chan_idx); +void rtw88xxa_query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, + struct rtw_rx_pkt_stat *pkt_stat, + s8 (*cck_rx_pwr)(u8 lna_idx, u8 vga_idx)); +void rtw88xxa_set_tx_power_index(struct rtw_dev *rtwdev); +void rtw88xxa_false_alarm_statistics(struct rtw_dev *rtwdev); +void rtw88xxa_iqk_backup_mac_bb(struct rtw_dev *rtwdev, + u32 *macbb_backup, + const u32 *backup_macbb_reg, + u32 macbb_num); +void rtw88xxa_iqk_backup_afe(struct rtw_dev *rtwdev, u32 *afe_backup, + const u32 *backup_afe_reg, u32 afe_num); +void rtw88xxa_iqk_restore_mac_bb(struct rtw_dev *rtwdev, + u32 *macbb_backup, + const u32 *backup_macbb_reg, + u32 macbb_num); +void rtw88xxa_iqk_configure_mac(struct rtw_dev *rtwdev); +bool rtw88xxa_iqk_finish(int average, int threshold, + int *x_temp, int *y_temp, int *x, int *y, + bool break_inner, bool break_outer); +void rtw88xxa_phy_pwrtrack(struct rtw_dev *rtwdev, + void (*do_lck)(struct rtw_dev *rtwdev), + void (*do_iqk)(struct rtw_dev *rtwdev)); +void rtw88xxa_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl); + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c index 1de93fc9efe9..90fc8a5fa89e 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.c +++ b/drivers/net/wireless/realtek/rtw88/rx.c @@ -234,10 +234,14 @@ static void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev, else rx_status->bw = RATE_INFO_BW_20; - rx_status->signal = pkt_stat->signal_power; - for (path = 0; path < rtwdev->hal.rf_path_num; path++) { - rx_status->chains |= BIT(path); - rx_status->chain_signal[path] = pkt_stat->rx_power[path]; + if (pkt_stat->phy_status) { + rx_status->signal = pkt_stat->signal_power; + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { + rx_status->chains |= BIT(path); + rx_status->chain_signal[path] = pkt_stat->rx_power[path]; + } + } else { + rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; } rtw_rx_addr_match(rtwdev, pkt_stat, hdr); diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c index f0b06ed8f76d..799230eb5f16 100644 --- a/drivers/net/wireless/realtek/rtw88/sdio.c +++ b/drivers/net/wireless/realtek/rtw88/sdio.c @@ -864,7 +864,7 @@ static void rtw_sdio_tx_skb_prepare(struct rtw_dev *rtwdev, pkt_info->qsel = rtw_sdio_get_tx_qsel(rtwdev, skb, queue); - rtw_tx_fill_tx_desc(pkt_info, skb); + rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, pkt_desc); } diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index dae7ca148865..6ed470dd6f22 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -32,7 +32,8 @@ void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif, } } -void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) +void rtw_tx_fill_tx_desc(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) { struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)skb->data; bool more_data = false; @@ -67,6 +68,9 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) tx_desc->w4 = le32_encode_bits(pkt_info->rate, RTW_TX_DESC_W4_DATARATE); + if (rtwdev->chip->old_datarate_fb_limit) + tx_desc->w4 |= le32_encode_bits(0x1f, RTW_TX_DESC_W4_DATARATE_FB_LIMIT); + tx_desc->w5 = le32_encode_bits(pkt_info->short_gi, RTW_TX_DESC_W5_DATA_SHORT) | le32_encode_bits(pkt_info->bw, RTW_TX_DESC_W5_DATA_BW) | le32_encode_bits(pkt_info->ldpc, RTW_TX_DESC_W5_DATA_LDPC) | diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h index 3d544fd7f60f..d34cdeca16f1 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.h +++ b/drivers/net/wireless/realtek/rtw88/tx.h @@ -44,6 +44,7 @@ struct rtw_tx_desc { #define RTW_TX_DESC_W3_NAVUSEHDR BIT(15) #define RTW_TX_DESC_W3_MAX_AGG_NUM GENMASK(21, 17) #define RTW_TX_DESC_W4_DATARATE GENMASK(6, 0) +#define RTW_TX_DESC_W4_DATARATE_FB_LIMIT GENMASK(12, 8) #define RTW_TX_DESC_W4_RTSRATE GENMASK(28, 24) #define RTW_TX_DESC_W5_DATA_SHORT BIT(4) #define RTW_TX_DESC_W5_DATA_BW GENMASK(6, 5) @@ -94,7 +95,8 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb); -void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb); +void rtw_tx_fill_tx_desc(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb); void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn); void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src); void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index ce43e44c5852..8d6db68246f1 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -458,7 +458,7 @@ static int rtw_usb_write_data(struct rtw_dev *rtwdev, skb_put_data(skb, buf, size); skb_push(skb, chip->tx_pkt_desc_sz); memset(skb->data, 0, chip->tx_pkt_desc_sz); - rtw_tx_fill_tx_desc(pkt_info, skb); + rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data); ret = rtw_usb_write_port(rtwdev, qsel, skb, @@ -478,6 +478,7 @@ static int rtw_usb_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, pkt_info.tx_pkt_size = size; pkt_info.qsel = TX_DESC_QSEL_BEACON; pkt_info.offset = chip->tx_pkt_desc_sz; + pkt_info.ls = true; return rtw_usb_write_data(rtwdev, &pkt_info, buf); } @@ -525,7 +526,7 @@ static int rtw_usb_tx_write(struct rtw_dev *rtwdev, pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); memset(pkt_desc, 0, chip->tx_pkt_desc_sz); ep = qsel_to_ep(rtwusb, pkt_info->qsel); - rtw_tx_fill_tx_desc(pkt_info, skb); + rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb); rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data); tx_data = rtw_usb_get_tx_data(skb); tx_data->sn = pkt_info->sn; diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 8d140b94cb44..8ef59994c0db 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -961,16 +961,24 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link, struct rtw89_h2c_dctlinfo_ud_v2 *h2c) { + struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link); + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif_link->rtwvif); + struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif_link, rtwsta_link); + bool is_mld = sta ? sta->mlo : ieee80211_vif_is_mld(vif); struct rtw89_wow_param *rtw_wow = &rtwdev->wow; u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv; + u8 *mld_sma, *mld_tma, *mld_bssid; h2c->c0 = le32_encode_bits(rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id, DCTLINFO_V2_C0_MACID) | le32_encode_bits(1, DCTLINFO_V2_C0_OP); + h2c->w2 = le32_encode_bits(is_mld, DCTLINFO_V2_W2_IS_MLD); + h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_IS_MLD); + h2c->w4 = le32_encode_bits(addr_cam->sec_ent_keyid[0], DCTLINFO_V2_W4_SEC_ENT0_KEYID) | le32_encode_bits(addr_cam->sec_ent_keyid[1], @@ -1036,4 +1044,47 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev, DCTLINFO_V2_W4_SEC_KEY_ID); h2c->m4 |= cpu_to_le32(DCTLINFO_V2_W4_SEC_KEY_ID); } + + if (!is_mld) + return; + + if (rtwvif_link->net_type == RTW89_NET_TYPE_INFRA) { + mld_sma = rtwvif->mac_addr; + mld_tma = vif->cfg.ap_addr; + mld_bssid = vif->cfg.ap_addr; + } else if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE && sta) { + mld_sma = rtwvif->mac_addr; + mld_tma = sta->addr; + mld_bssid = rtwvif->mac_addr; + } else { + return; + } + + h2c->w8 = le32_encode_bits(mld_sma[0], DCTLINFO_V2_W8_MLD_SMA_0) | + le32_encode_bits(mld_sma[1], DCTLINFO_V2_W8_MLD_SMA_1) | + le32_encode_bits(mld_sma[2], DCTLINFO_V2_W8_MLD_SMA_2) | + le32_encode_bits(mld_sma[3], DCTLINFO_V2_W8_MLD_SMA_3); + h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); + + h2c->w9 = le32_encode_bits(mld_sma[4], DCTLINFO_V2_W9_MLD_SMA_4) | + le32_encode_bits(mld_sma[5], DCTLINFO_V2_W9_MLD_SMA_5) | + le32_encode_bits(mld_tma[0], DCTLINFO_V2_W9_MLD_TMA_0) | + le32_encode_bits(mld_tma[1], DCTLINFO_V2_W9_MLD_TMA_1); + h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); + + h2c->w10 = le32_encode_bits(mld_tma[2], DCTLINFO_V2_W10_MLD_TMA_2) | + le32_encode_bits(mld_tma[3], DCTLINFO_V2_W10_MLD_TMA_3) | + le32_encode_bits(mld_tma[4], DCTLINFO_V2_W10_MLD_TMA_4) | + le32_encode_bits(mld_tma[5], DCTLINFO_V2_W10_MLD_TMA_5); + h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); + + h2c->w11 = le32_encode_bits(mld_bssid[0], DCTLINFO_V2_W11_MLD_BSSID_0) | + le32_encode_bits(mld_bssid[1], DCTLINFO_V2_W11_MLD_BSSID_1) | + le32_encode_bits(mld_bssid[2], DCTLINFO_V2_W11_MLD_BSSID_2) | + le32_encode_bits(mld_bssid[3], DCTLINFO_V2_W11_MLD_BSSID_3); + h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); + + h2c->w12 = le32_encode_bits(mld_bssid[4], DCTLINFO_V2_W12_MLD_BSSID_4) | + le32_encode_bits(mld_bssid[5], DCTLINFO_V2_W12_MLD_BSSID_5); + h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); } diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h index a6f72edd30fe..3134ebf08825 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.h +++ b/drivers/net/wireless/realtek/rtw89/cam.h @@ -514,16 +514,28 @@ struct rtw89_h2c_dctlinfo_ud_v2 { #define DCTLINFO_V2_W7_SEC_ENT7 GENMASK(23, 16) #define DCTLINFO_V2_W7_SEC_ENT8 GENMASK(31, 24) #define DCTLINFO_V2_W7_ALL GENMASK(31, 0) -#define DCTLINFO_V2_W8_MLD_SMA_L_V1 GENMASK(31, 0) +#define DCTLINFO_V2_W8_MLD_SMA_0 GENMASK(7, 0) +#define DCTLINFO_V2_W8_MLD_SMA_1 GENMASK(15, 8) +#define DCTLINFO_V2_W8_MLD_SMA_2 GENMASK(23, 16) +#define DCTLINFO_V2_W8_MLD_SMA_3 GENMASK(31, 24) #define DCTLINFO_V2_W8_ALL GENMASK(31, 0) -#define DCTLINFO_V2_W9_MLD_SMA_H_V1 GENMASK(15, 0) -#define DCTLINFO_V2_W9_MLD_TMA_L_V1 GENMASK(31, 16) +#define DCTLINFO_V2_W9_MLD_SMA_4 GENMASK(7, 0) +#define DCTLINFO_V2_W9_MLD_SMA_5 GENMASK(15, 8) +#define DCTLINFO_V2_W9_MLD_TMA_0 GENMASK(23, 16) +#define DCTLINFO_V2_W9_MLD_TMA_1 GENMASK(31, 24) #define DCTLINFO_V2_W9_ALL GENMASK(31, 0) -#define DCTLINFO_V2_W10_MLD_TMA_H_V1 GENMASK(31, 0) +#define DCTLINFO_V2_W10_MLD_TMA_2 GENMASK(7, 0) +#define DCTLINFO_V2_W10_MLD_TMA_3 GENMASK(15, 8) +#define DCTLINFO_V2_W10_MLD_TMA_4 GENMASK(23, 16) +#define DCTLINFO_V2_W10_MLD_TMA_5 GENMASK(31, 24) #define DCTLINFO_V2_W10_ALL GENMASK(31, 0) -#define DCTLINFO_V2_W11_MLD_TA_BSSID_L_V1 GENMASK(31, 0) +#define DCTLINFO_V2_W11_MLD_BSSID_0 GENMASK(7, 0) +#define DCTLINFO_V2_W11_MLD_BSSID_1 GENMASK(15, 8) +#define DCTLINFO_V2_W11_MLD_BSSID_2 GENMASK(23, 16) +#define DCTLINFO_V2_W11_MLD_BSSID_3 GENMASK(31, 24) #define DCTLINFO_V2_W11_ALL GENMASK(31, 0) -#define DCTLINFO_V2_W12_MLD_TA_BSSID_H_V1 GENMASK(15, 0) +#define DCTLINFO_V2_W12_MLD_BSSID_4 GENMASK(7, 0) +#define DCTLINFO_V2_W12_MLD_BSSID_5 GENMASK(15, 8) #define DCTLINFO_V2_W12_ALL GENMASK(15, 0) int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif); diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c index ba6332da8019..fb9449930c40 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.c +++ b/drivers/net/wireless/realtek/rtw89/chan.c @@ -10,6 +10,10 @@ #include "ps.h" #include "util.h" +static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_idx idx1, + enum rtw89_chanctx_idx idx2); + static enum rtw89_subband rtw89_get_subband_type(enum rtw89_band band, u8 center_chan) { @@ -226,11 +230,15 @@ static void rtw89_config_default_chandef(struct rtw89_dev *rtwdev) void rtw89_entity_init(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt; hal->entity_pause = false; bitmap_zero(hal->entity_map, NUM_OF_RTW89_CHANCTX); bitmap_zero(hal->changes, NUM_OF_RTW89_CHANCTX_CHANGES); atomic_set(&hal->roc_chanctx_idx, RTW89_CHANCTX_IDLE); + + INIT_LIST_HEAD(&mgnt->active_list); + rtw89_config_default_chandef(rtwdev); } @@ -272,6 +280,142 @@ static void rtw89_entity_calculate_weight(struct rtw89_dev *rtwdev, } } +static void rtw89_normalize_link_chanctx(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ + struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; + struct rtw89_vif_link *cur; + + if (unlikely(!rtwvif_link->chanctx_assigned)) + return; + + cur = rtw89_vif_get_link_inst(rtwvif, 0); + if (!cur || !cur->chanctx_assigned) + return; + + if (cur == rtwvif_link) + return; + + rtw89_swap_chanctx(rtwdev, rtwvif_link->chanctx_idx, cur->chanctx_idx); +} + +const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev, + const char *caller_message, + u8 link_index) +{ + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt; + enum rtw89_chanctx_idx chanctx_idx; + enum rtw89_chanctx_idx roc_idx; + enum rtw89_entity_mode mode; + u8 role_index; + + lockdep_assert_held(&rtwdev->mutex); + + if (unlikely(link_index >= __RTW89_MLD_MAX_LINK_NUM)) { + WARN(1, "link index %u is invalid (max link inst num: %d)\n", + link_index, __RTW89_MLD_MAX_LINK_NUM); + goto dflt; + } + + mode = rtw89_get_entity_mode(rtwdev); + switch (mode) { + case RTW89_ENTITY_MODE_SCC_OR_SMLD: + case RTW89_ENTITY_MODE_MCC: + role_index = 0; + break; + case RTW89_ENTITY_MODE_MCC_PREPARE: + role_index = 1; + break; + default: + WARN(1, "Invalid ent mode: %d\n", mode); + goto dflt; + } + + chanctx_idx = mgnt->chanctx_tbl[role_index][link_index]; + if (chanctx_idx == RTW89_CHANCTX_IDLE) + goto dflt; + + roc_idx = atomic_read(&hal->roc_chanctx_idx); + if (roc_idx != RTW89_CHANCTX_IDLE) { + /* ROC is ongoing (given ROC runs on RTW89_ROC_BY_LINK_INDEX). + * If @link_index is the same as RTW89_ROC_BY_LINK_INDEX, get + * the ongoing ROC chanctx. + */ + if (link_index == RTW89_ROC_BY_LINK_INDEX) + chanctx_idx = roc_idx; + } + + return rtw89_chan_get(rtwdev, chanctx_idx); + +dflt: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "%s (%s): prefetch NULL on link index %u\n", + __func__, caller_message ?: "", link_index); + + return rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); +} +EXPORT_SYMBOL(__rtw89_mgnt_chan_get); + +static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt; + struct rtw89_vif_link *link; + struct rtw89_vif *role; + u8 pos = 0; + int i, j; + + lockdep_assert_held(&rtwdev->mutex); + + for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++) + mgnt->active_roles[i] = NULL; + + for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++) { + for (j = 0; j < __RTW89_MLD_MAX_LINK_NUM; j++) + mgnt->chanctx_tbl[i][j] = RTW89_CHANCTX_IDLE; + } + + /* To be consistent with legacy behavior, expect the first active role + * which uses RTW89_CHANCTX_0 to put at position 0, and make its first + * link instance take RTW89_CHANCTX_0. (normalizing) + */ + list_for_each_entry(role, &mgnt->active_list, mgnt_entry) { + for (i = 0; i < role->links_inst_valid_num; i++) { + link = rtw89_vif_get_link_inst(role, i); + if (!link || !link->chanctx_assigned) + continue; + + if (link->chanctx_idx == RTW89_CHANCTX_0) { + rtw89_normalize_link_chanctx(rtwdev, link); + + list_del(&role->mgnt_entry); + list_add(&role->mgnt_entry, &mgnt->active_list); + break; + } + } + } + + list_for_each_entry(role, &mgnt->active_list, mgnt_entry) { + if (unlikely(pos >= RTW89_MAX_INTERFACE_NUM)) { + rtw89_warn(rtwdev, + "%s: active roles are over max iface num\n", + __func__); + break; + } + + for (i = 0; i < role->links_inst_valid_num; i++) { + link = rtw89_vif_get_link_inst(role, i); + if (!link || !link->chanctx_assigned) + continue; + + mgnt->chanctx_tbl[pos][i] = link->chanctx_idx; + } + + mgnt->active_roles[pos++] = role; + } +} + enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) { DECLARE_BITMAP(recalc_map, NUM_OF_RTW89_CHANCTX) = {}; @@ -298,9 +442,14 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) set_bit(RTW89_CHANCTX_0, recalc_map); fallthrough; case 1: - mode = RTW89_ENTITY_MODE_SCC; + mode = RTW89_ENTITY_MODE_SCC_OR_SMLD; break; case 2 ... NUM_OF_RTW89_CHANCTX: + if (w.active_roles == 1) { + mode = RTW89_ENTITY_MODE_SCC_OR_SMLD; + break; + } + if (w.active_roles != NUM_OF_RTW89_MCC_ROLES) { rtw89_debug(rtwdev, RTW89_DBG_CHAN, "unhandled ent: %d chanctxs %d roles\n", @@ -327,6 +476,8 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) rtw89_assign_entity_chan(rtwdev, idx, &chan); } + rtw89_entity_recalc_mgnt_roles(rtwdev); + if (hal->entity_pause) return rtw89_get_entity_mode(rtwdev); @@ -716,6 +867,7 @@ struct rtw89_mcc_fill_role_selector { }; static_assert((u8)NUM_OF_RTW89_CHANCTX >= NUM_OF_RTW89_MCC_ROLES); +static_assert(RTW89_MAX_INTERFACE_NUM >= NUM_OF_RTW89_MCC_ROLES); static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev, struct rtw89_mcc_role *mcc_role, @@ -745,14 +897,18 @@ static int rtw89_mcc_fill_role_iterator(struct rtw89_dev *rtwdev, static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev) { + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt; struct rtw89_mcc_fill_role_selector sel = {}; struct rtw89_vif_link *rtwvif_link; struct rtw89_vif *rtwvif; int ret; + int i; - rtw89_for_each_rtwvif(rtwdev, rtwvif) { - if (!rtw89_vif_is_active_role(rtwvif)) - continue; + for (i = 0; i < NUM_OF_RTW89_MCC_ROLES; i++) { + rtwvif = mgnt->active_roles[i]; + if (!rtwvif) + break; rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); if (unlikely(!rtwvif_link)) { @@ -760,14 +916,7 @@ static int rtw89_mcc_fill_all_roles(struct rtw89_dev *rtwdev) continue; } - if (sel.bind_vif[rtwvif_link->chanctx_idx]) { - rtw89_warn(rtwdev, - "MCC skip extra vif <macid %d> on chanctx[%d]\n", - rtwvif_link->mac_id, rtwvif_link->chanctx_idx); - continue; - } - - sel.bind_vif[rtwvif_link->chanctx_idx] = rtwvif_link; + sel.bind_vif[i] = rtwvif_link; } ret = rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_fill_role_iterator, &sel); @@ -2501,12 +2650,18 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx) { struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv; + struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt; struct rtw89_entity_weight w = {}; rtwvif_link->chanctx_idx = cfg->idx; rtwvif_link->chanctx_assigned = true; cfg->ref_count++; + if (list_empty(&rtwvif->mgnt_entry)) + list_add_tail(&rtwvif->mgnt_entry, &mgnt->active_list); + if (cfg->idx == RTW89_CHANCTX_0) goto out; @@ -2526,6 +2681,7 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx) { struct rtw89_chanctx_cfg *cfg = (struct rtw89_chanctx_cfg *)ctx->drv_priv; + struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; struct rtw89_hal *hal = &rtwdev->hal; enum rtw89_chanctx_idx roll; enum rtw89_entity_mode cur; @@ -2536,6 +2692,9 @@ void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev, rtwvif_link->chanctx_assigned = false; cfg->ref_count--; + if (!rtw89_vif_is_active_role(rtwvif)) + list_del_init(&rtwvif->mgnt_entry); + if (cfg->ref_count != 0) goto out; diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h index 74de13a2e7da..2eb31dff2083 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.h +++ b/drivers/net/wireless/realtek/rtw89/chan.h @@ -101,6 +101,14 @@ void rtw89_chanctx_track(struct rtw89_dev *rtwdev); void rtw89_chanctx_pause(struct rtw89_dev *rtwdev, enum rtw89_chanctx_pause_reasons rsn); void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev); + +const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev, + const char *caller_message, + u8 link_index); + +#define rtw89_mgnt_chan_get(rtwdev, link_index) \ + __rtw89_mgnt_chan_get(rtwdev, __func__, link_index) + int rtw89_chanctx_ops_add(struct rtw89_dev *rtwdev, struct ieee80211_chanctx_conf *ctx); void rtw89_chanctx_ops_remove(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index b60c8bd4537b..68316d44b204 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -2507,6 +2507,8 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) if (ver->fcxmreg == 7) { sz = struct_size(v7, regs, n); v7 = kmalloc(sz, GFP_KERNEL); + if (!v7) + return; v7->type = RPT_EN_MREG; v7->fver = ver->fcxmreg; v7->len = n; @@ -2521,6 +2523,8 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev) } else { sz = struct_size(v1, regs, n); v1 = kmalloc(sz, GFP_KERNEL); + if (!v1) + return; v1->fver = ver->fcxmreg; v1->reg_num = n; memcpy(v1->regs, chip->mon_reg, flex_array_size(v1, regs, n)); @@ -3695,6 +3699,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) struct rtw89_btc_dm *dm = &btc->dm; struct rtw89_btc_fbtc_tdma *t = &dm->tdma; struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1; + struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc; struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc; struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc; struct rtw89_btc_wl_info *wl = &btc->cx.wl; @@ -3853,7 +3858,10 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype); break; case BTC_CXP_OFFE_2GBWMIXB: - _slot_set(btc, CXST_E2G, 0, 0xea5a5555, SLOT_MIX); + if (a2dp->exist) + _slot_set(btc, CXST_E2G, 0, cxtbl[2], SLOT_MIX); + else + _slot_set(btc, CXST_E2G, 0, tbl_w1, SLOT_MIX); _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur, s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); break; diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index bba5bde95bb4..e5b2968c1431 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -192,13 +192,13 @@ static const struct ieee80211_iface_combination rtw89_iface_combs[] = { { .limits = rtw89_iface_limits, .n_limits = ARRAY_SIZE(rtw89_iface_limits), - .max_interfaces = 2, + .max_interfaces = RTW89_MAX_INTERFACE_NUM, .num_different_channels = 1, }, { .limits = rtw89_iface_limits_mcc, .n_limits = ARRAY_SIZE(rtw89_iface_limits_mcc), - .max_interfaces = 2, + .max_interfaces = RTW89_MAX_INTERFACE_NUM, .num_different_channels = 2, }, }; @@ -341,84 +341,47 @@ void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, rtw89_chan_create(chan, center_chan, channel->hw_value, band, bandwidth); } -void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) +static void __rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) { - struct rtw89_hal *hal = &rtwdev->hal; const struct rtw89_chip_info *chip = rtwdev->chip; - const struct rtw89_chan *chan; - enum rtw89_chanctx_idx chanctx_idx; - enum rtw89_chanctx_idx roc_idx; - enum rtw89_phy_idx phy_idx; - enum rtw89_entity_mode mode; bool entity_active; - mode = rtw89_get_entity_mode(rtwdev); - switch (mode) { - case RTW89_ENTITY_MODE_SCC: - case RTW89_ENTITY_MODE_MCC: - chanctx_idx = RTW89_CHANCTX_0; - break; - case RTW89_ENTITY_MODE_MCC_PREPARE: - chanctx_idx = RTW89_CHANCTX_1; - break; - default: - WARN(1, "Invalid ent mode: %d\n", mode); + entity_active = rtw89_get_entity_state(rtwdev, phy_idx); + if (!entity_active) return; - } - roc_idx = atomic_read(&hal->roc_chanctx_idx); - if (roc_idx != RTW89_CHANCTX_IDLE) - chanctx_idx = roc_idx; + chip->ops->set_txpwr(rtwdev, chan, phy_idx); +} - phy_idx = RTW89_PHY_0; +void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chan *chan; - entity_active = rtw89_get_entity_state(rtwdev, phy_idx); - if (!entity_active) + chan = rtw89_mgnt_chan_get(rtwdev, 0); + __rtw89_core_set_chip_txpwr(rtwdev, chan, RTW89_PHY_0); + + if (!rtwdev->support_mlo) return; - chan = rtw89_chan_get(rtwdev, chanctx_idx); - chip->ops->set_txpwr(rtwdev, chan, phy_idx); + chan = rtw89_mgnt_chan_get(rtwdev, 1); + __rtw89_core_set_chip_txpwr(rtwdev, chan, RTW89_PHY_1); } -int rtw89_set_channel(struct rtw89_dev *rtwdev) +static void __rtw89_set_channel(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_mac_idx mac_idx, + enum rtw89_phy_idx phy_idx) { - struct rtw89_hal *hal = &rtwdev->hal; const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_chan_rcd *chan_rcd; - const struct rtw89_chan *chan; - enum rtw89_chanctx_idx chanctx_idx; - enum rtw89_chanctx_idx roc_idx; - enum rtw89_mac_idx mac_idx; - enum rtw89_phy_idx phy_idx; struct rtw89_channel_help_params bak; - enum rtw89_entity_mode mode; bool entity_active; - mode = rtw89_entity_recalc(rtwdev); - switch (mode) { - case RTW89_ENTITY_MODE_SCC: - case RTW89_ENTITY_MODE_MCC: - chanctx_idx = RTW89_CHANCTX_0; - break; - case RTW89_ENTITY_MODE_MCC_PREPARE: - chanctx_idx = RTW89_CHANCTX_1; - break; - default: - WARN(1, "Invalid ent mode: %d\n", mode); - return -EINVAL; - } - - roc_idx = atomic_read(&hal->roc_chanctx_idx); - if (roc_idx != RTW89_CHANCTX_IDLE) - chanctx_idx = roc_idx; - - mac_idx = RTW89_MAC_0; - phy_idx = RTW89_PHY_0; - entity_active = rtw89_get_entity_state(rtwdev, phy_idx); - chan = rtw89_chan_get(rtwdev, chanctx_idx); - chan_rcd = rtw89_chan_rcd_get(rtwdev, chanctx_idx); + chan_rcd = rtw89_chan_rcd_get_by_chan(chan); rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx); @@ -434,6 +397,28 @@ int rtw89_set_channel(struct rtw89_dev *rtwdev) } rtw89_set_entity_state(rtwdev, phy_idx, true); +} + +int rtw89_set_channel(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chan *chan; + enum rtw89_entity_mode mode; + + mode = rtw89_entity_recalc(rtwdev); + if (mode < 0 || mode >= NUM_OF_RTW89_ENTITY_MODE) { + WARN(1, "Invalid ent mode: %d\n", mode); + return -EINVAL; + } + + chan = rtw89_mgnt_chan_get(rtwdev, 0); + __rtw89_set_channel(rtwdev, chan, RTW89_MAC_0, RTW89_PHY_0); + + if (!rtwdev->support_mlo) + return 0; + + chan = rtw89_mgnt_chan_get(rtwdev, 1); + __rtw89_set_channel(rtwdev, chan, RTW89_MAC_1, RTW89_PHY_1); + return 0; } @@ -939,8 +924,10 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, struct sk_buff *skb = tx_req->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; + struct rtw89_addr_cam_entry *addr_cam; enum rtw89_core_tx_type tx_type; enum btc_pkt_type pkt_type; + bool upd_wlan_hdr = false; bool is_bmc; u16 seq; @@ -948,6 +935,11 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) { tx_type = rtw89_core_get_tx_type(rtwdev, skb); tx_req->tx_type = tx_type; + + addr_cam = rtw89_get_addr_cam_of(tx_req->rtwvif_link, + tx_req->rtwsta_link); + if (addr_cam->valid) + upd_wlan_hdr = true; } is_bmc = (is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1)); @@ -957,6 +949,7 @@ rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev, desc_info->is_bmc = is_bmc; desc_info->wd_page = true; desc_info->hiq = info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM; + desc_info->upd_wlan_hdr = upd_wlan_hdr; switch (tx_req->tx_type) { case RTW89_CORE_TX_TYPE_MGMT: @@ -1355,6 +1348,13 @@ static __le32 rtw89_build_txwd_body5_v2(struct rtw89_tx_desc_info *desc_info) return cpu_to_le32(dword); } +static __le32 rtw89_build_txwd_body6_v2(struct rtw89_tx_desc_info *desc_info) +{ + u32 dword = FIELD_PREP(BE_TXD_BODY6_UPD_WLAN_HDR, desc_info->upd_wlan_hdr); + + return cpu_to_le32(dword); +} + static __le32 rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(BE_TXD_BODY7_USERATE_SEL, desc_info->use_rate) | @@ -1418,6 +1418,7 @@ void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev, txwd_body->dword4 = rtw89_build_txwd_body4_v2(desc_info); txwd_body->dword5 = rtw89_build_txwd_body5_v2(desc_info); } + txwd_body->dword6 = rtw89_build_txwd_body6_v2(desc_info); txwd_body->dword7 = rtw89_build_txwd_body7_v2(desc_info); if (!desc_info->en_wd_info) @@ -1858,32 +1859,58 @@ static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev, phy_ppdu); } -static u8 rtw89_rxdesc_to_nl_he_eht_gi(struct rtw89_dev *rtwdev, - u8 desc_info_gi, - bool rx_status, bool eht) +static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev, + u8 desc_info_gi, + bool rx_status) { switch (desc_info_gi) { case RTW89_GILTF_SGI_4XHE08: case RTW89_GILTF_2XHE08: case RTW89_GILTF_1XHE08: - return eht ? NL80211_RATE_INFO_EHT_GI_0_8 : - NL80211_RATE_INFO_HE_GI_0_8; + return NL80211_RATE_INFO_HE_GI_0_8; case RTW89_GILTF_2XHE16: case RTW89_GILTF_1XHE16: - return eht ? NL80211_RATE_INFO_EHT_GI_1_6 : - NL80211_RATE_INFO_HE_GI_1_6; + return NL80211_RATE_INFO_HE_GI_1_6; case RTW89_GILTF_LGI_4XHE32: - return eht ? NL80211_RATE_INFO_EHT_GI_3_2 : - NL80211_RATE_INFO_HE_GI_3_2; + return NL80211_RATE_INFO_HE_GI_3_2; default: rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info_gi); if (rx_status) - return eht ? NL80211_RATE_INFO_EHT_GI_3_2 : - NL80211_RATE_INFO_HE_GI_3_2; + return NL80211_RATE_INFO_HE_GI_3_2; return U8_MAX; } } +static u8 rtw89_rxdesc_to_nl_eht_gi(struct rtw89_dev *rtwdev, + u8 desc_info_gi, + bool rx_status) +{ + switch (desc_info_gi) { + case RTW89_GILTF_SGI_4XHE08: + case RTW89_GILTF_2XHE08: + case RTW89_GILTF_1XHE08: + return NL80211_RATE_INFO_EHT_GI_0_8; + case RTW89_GILTF_2XHE16: + case RTW89_GILTF_1XHE16: + return NL80211_RATE_INFO_EHT_GI_1_6; + case RTW89_GILTF_LGI_4XHE32: + return NL80211_RATE_INFO_EHT_GI_3_2; + default: + rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info_gi); + if (rx_status) + return NL80211_RATE_INFO_EHT_GI_3_2; + return U8_MAX; + } +} + +static u8 rtw89_rxdesc_to_nl_he_eht_gi(struct rtw89_dev *rtwdev, + u8 desc_info_gi, + bool rx_status, bool eht) +{ + return eht ? rtw89_rxdesc_to_nl_eht_gi(rtwdev, desc_info_gi, rx_status) : + rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info_gi, rx_status); +} + static bool rtw89_check_rx_statu_gi_match(struct ieee80211_rx_status *status, u8 gi_ltf, bool eht) @@ -3162,9 +3189,10 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_leave_ips_by_hwflags(rtwdev); rtw89_leave_lps(rtwdev); - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); + rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX); if (unlikely(!rtwvif_link)) { - rtw89_err(rtwdev, "roc start: find no link on HW-0\n"); + rtw89_err(rtwdev, "roc start: find no link on HW-%u\n", + RTW89_ROC_BY_LINK_INDEX); return; } @@ -3217,9 +3245,10 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_leave_ips_by_hwflags(rtwdev); rtw89_leave_lps(rtwdev); - rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); + rtwvif_link = rtw89_vif_get_link_inst(rtwvif, RTW89_ROC_BY_LINK_INDEX); if (unlikely(!rtwvif_link)) { - rtw89_err(rtwdev, "roc end: find no link on HW-0\n"); + rtw89_err(rtwdev, "roc end: find no link on HW-%u\n", + RTW89_ROC_BY_LINK_INDEX); return; } @@ -4818,11 +4847,20 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev) static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev) { + const struct rtw89_chip_info *chip = rtwdev->chip; + rtwdev->hal.support_cckpd = !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) && !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV); rtwdev->hal.support_igi = rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV; + + if (test_bit(RTW89_QUIRK_THERMAL_PROT_120C, rtwdev->quirks)) + rtwdev->hal.thermal_prot_th = chip->thermal_th[1]; + else if (test_bit(RTW89_QUIRK_THERMAL_PROT_110C, rtwdev->quirks)) + rtwdev->hal.thermal_prot_th = chip->thermal_th[0]; + else + rtwdev->hal.thermal_prot_th = 0; } static void rtw89_core_setup_rfe_parms(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 38df161bfd35..5ad32eacd0d5 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -1162,6 +1162,7 @@ struct rtw89_tx_desc_info { bool er_cap; bool stbc; bool ldpc; + bool upd_wlan_hdr; }; struct rtw89_core_tx_request { @@ -3427,6 +3428,8 @@ enum rtw89_roc_state { RTW89_ROC_MGMT, }; +#define RTW89_ROC_BY_LINK_INDEX 0 + struct rtw89_roc { struct ieee80211_channel chan; struct delayed_work roc_work; @@ -4244,6 +4247,7 @@ struct rtw89_chip_info { u8 wde_qempty_acq_grpnum; u8 wde_qempty_mgq_grpsel; u32 rf_base_addr[2]; + u8 thermal_th[2]; u8 support_macid_num; u8 support_link_num; u8 support_chanctx_num; @@ -4512,11 +4516,14 @@ enum rtw89_fw_mss_dev_type { }; struct rtw89_fw_secure { - bool secure_boot; + bool secure_boot: 1; + bool can_mss_v1: 1; + bool can_mss_v0: 1; u32 sb_sel_mgn; u8 mss_dev_type; u8 mss_cust_idx; u8 mss_key_num; + u8 mss_idx; /* v0 */ }; struct rtw89_fw_info { @@ -4562,7 +4569,7 @@ enum rtw89_sar_subband { RTW89_SAR_2GHZ_SUBBAND, RTW89_SAR_5GHZ_SUBBAND_1_2, /* U-NII-1 and U-NII-2 */ RTW89_SAR_5GHZ_SUBBAND_2_E, /* U-NII-2-Extended */ - RTW89_SAR_5GHZ_SUBBAND_3, /* U-NII-3 */ + RTW89_SAR_5GHZ_SUBBAND_3_4, /* U-NII-3 and U-NII-4 */ RTW89_SAR_6GHZ_SUBBAND_5_L, /* U-NII-5 lower part */ RTW89_SAR_6GHZ_SUBBAND_5_H, /* U-NII-5 higher part */ RTW89_SAR_6GHZ_SUBBAND_6, /* U-NII-6 */ @@ -4624,7 +4631,7 @@ enum rtw89_chanctx_changes { }; enum rtw89_entity_mode { - RTW89_ENTITY_MODE_SCC, + RTW89_ENTITY_MODE_SCC_OR_SMLD, RTW89_ENTITY_MODE_MCC_PREPARE, RTW89_ENTITY_MODE_MCC, @@ -4633,6 +4640,16 @@ enum rtw89_entity_mode { RTW89_ENTITY_MODE_UNHANDLED = -ESRCH, }; +#define RTW89_MAX_INTERFACE_NUM 2 + +/* only valid when running with chanctx_ops */ +struct rtw89_entity_mgnt { + struct list_head active_list; + struct rtw89_vif *active_roles[RTW89_MAX_INTERFACE_NUM]; + enum rtw89_chanctx_idx chanctx_tbl[RTW89_MAX_INTERFACE_NUM] + [__RTW89_MLD_MAX_LINK_NUM]; +}; + struct rtw89_chanctx { struct cfg80211_chan_def chandef; struct rtw89_chan chan; @@ -4651,8 +4668,12 @@ struct rtw89_edcca_bak { enum rtw89_dm_type { RTW89_DM_DYNAMIC_EDCCA, + RTW89_DM_THERMAL_PROTECT, }; +#define RTW89_THERMAL_PROT_LV_MAX 5 +#define RTW89_THERMAL_PROT_STEP 19 /* -19% for each level */ + struct rtw89_hal { u32 rx_fltr; u8 cv; @@ -4676,9 +4697,13 @@ struct rtw89_hal { bool entity_active[RTW89_PHY_MAX]; bool entity_pause; enum rtw89_entity_mode entity_mode; + struct rtw89_entity_mgnt entity_mgnt; struct rtw89_edcca_bak edcca_bak; u32 disabled_dm_bitmap; /* bitmap of enum rtw89_dm_type */ + + u8 thermal_prot_th; + u8 thermal_prot_lv; /* 0 ~ RTW89_THERMAL_PROT_LV_MAX */ }; #define RTW89_MAX_MAC_ID_NUM 128 @@ -4711,10 +4736,22 @@ enum rtw89_flags { enum rtw89_quirks { RTW89_QUIRK_PCI_BER, + RTW89_QUIRK_THERMAL_PROT_120C, + RTW89_QUIRK_THERMAL_PROT_110C, NUM_OF_RTW89_QUIRKS, }; +enum rtw89_custid { + RTW89_CUSTID_NONE, + RTW89_CUSTID_ACER, + RTW89_CUSTID_AMD, + RTW89_CUSTID_ASUS, + RTW89_CUSTID_DELL, + RTW89_CUSTID_HP, + RTW89_CUSTID_LENOVO, +}; + enum rtw89_pkt_drop_sel { RTW89_PKT_DROP_SEL_MACID_BE_ONCE, RTW89_PKT_DROP_SEL_MACID_BK_ONCE, @@ -4751,6 +4788,7 @@ DECLARE_EWMA(thermal, 4, 4); struct rtw89_phy_stat { struct ewma_thermal avg_thermal[RF_PATH_MAX]; + u8 last_thermal_max; struct ewma_rssi bcn_rssi; struct rtw89_pkt_stat cur_pkt_stat; struct rtw89_pkt_stat last_pkt_stat; @@ -4793,13 +4831,17 @@ enum rtw89_rfk_chs_nrs { RTW89_RFK_CHS_NR = __RTW89_RFK_CHS_NR_V1, }; -struct rtw89_rfk_mcc_info { +struct rtw89_rfk_mcc_info_data { u8 ch[RTW89_RFK_CHS_NR]; u8 band[RTW89_RFK_CHS_NR]; u8 bw[RTW89_RFK_CHS_NR]; u8 table_idx; }; +struct rtw89_rfk_mcc_info { + struct rtw89_rfk_mcc_info_data data[2]; +}; + #define RTW89_IQK_CHS_NR 2 #define RTW89_IQK_PATH_NR 4 @@ -4904,6 +4946,7 @@ struct rtw89_agc_gaincode_set { #define IGI_RSSI_TH_NUM 5 #define FA_TH_NUM 4 +#define TIA_LNA_OP1DB_NUM 8 #define LNA_GAIN_NUM 7 #define TIA_GAIN_NUM 2 struct rtw89_dig_info { @@ -5511,6 +5554,7 @@ struct rtw89_dev { struct rtw89_efuse efuse; struct rtw89_traffic_stats stats; struct rtw89_rfe_data *rfe_data; + enum rtw89_custid custid; /* ensures exclusive access from mac80211 callbacks */ struct mutex mutex; @@ -5613,6 +5657,7 @@ struct rtw89_dev { struct rtw89_vif { struct rtw89_dev *rtwdev; struct list_head list; + struct list_head mgnt_entry; u8 mac_addr[ETH_ALEN]; __be32 ip_addr; @@ -6368,6 +6413,15 @@ const struct rtw89_chan_rcd *rtw89_chan_rcd_get(struct rtw89_dev *rtwdev, } static inline +const struct rtw89_chan_rcd *rtw89_chan_rcd_get_by_chan(const struct rtw89_chan *chan) +{ + const struct rtw89_chanctx *chanctx = + container_of_const(chan, struct rtw89_chanctx, chan); + + return &chanctx->rcd; +} + +static inline const struct rtw89_chan *rtw89_scan_chan_get(struct rtw89_dev *rtwdev) { struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index 74b0c722a5b8..6abd88fa80ba 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -3672,14 +3672,19 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v) struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat; const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_rx_rate_cnt_info *info; + struct rtw89_hal *hal = &rtwdev->hal; enum rtw89_hw_rate first_rate; u8 rssi; int i; rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi); - seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d), RX: %u [%u] Mbps (lv: %d)\n", - stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv, + seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d", + stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv); + if (hal->thermal_prot_lv) + seq_printf(m, ", duty: %d%%", + 100 - hal->thermal_prot_lv * RTW89_THERMAL_PROT_STEP); + seq_printf(m, "), RX: %u [%u] Mbps (lv: %d)\n", stats->rx_throughput, stats->rx_throughput_raw, stats->rx_tfc_lv); seq_printf(m, "Beacon: %u (%d dBm), TF: %u\n", pkt_stat->beacon_nr, RTW89_RSSI_RAW_TO_DBM(rssi), stats->rx_tf_periodic); @@ -3886,6 +3891,7 @@ static const struct rtw89_disabled_dm_info { const char *name; } rtw89_disabled_dm_infos[] = { DM_INFO(DYNAMIC_EDCCA), + DM_INFO(THERMAL_PROTECT), }; static int diff --git a/drivers/net/wireless/realtek/rtw89/efuse.c b/drivers/net/wireless/realtek/rtw89/efuse.c index e1236079a84a..6c6c763510af 100644 --- a/drivers/net/wireless/realtek/rtw89/efuse.c +++ b/drivers/net/wireless/realtek/rtw89/efuse.c @@ -11,11 +11,38 @@ #define EF_CV_MASK GENMASK(7, 4) #define EF_CV_INV 15 +#define EFUSE_B1_MSSDEVTYPE_MASK GENMASK(3, 0) +#define EFUSE_B1_MSSCUSTIDX0_MASK GENMASK(7, 4) +#define EFUSE_B2_MSSKEYNUM_MASK GENMASK(3, 0) +#define EFUSE_B2_MSSCUSTIDX1_MASK BIT(6) + +#define EFUSE_EXTERNALPN_ADDR_AX 0x5EC +#define EFUSE_CUSTOMER_ADDR_AX 0x5ED +#define EFUSE_SERIALNUM_ADDR_AX 0x5ED + +#define EFUSE_B1_EXTERNALPN_MASK GENMASK(7, 0) +#define EFUSE_B2_CUSTOMER_MASK GENMASK(3, 0) +#define EFUSE_B2_SERIALNUM_MASK GENMASK(6, 4) + +#define OTP_KEY_INFO_NUM 2 + +static const u8 otp_key_info_externalPN[OTP_KEY_INFO_NUM] = {0x0, 0x0}; +static const u8 otp_key_info_customer[OTP_KEY_INFO_NUM] = {0x0, 0x1}; +static const u8 otp_key_info_serialNum[OTP_KEY_INFO_NUM] = {0x0, 0x1}; + enum rtw89_efuse_bank { RTW89_EFUSE_BANK_WIFI, RTW89_EFUSE_BANK_BT, }; +enum rtw89_efuse_mss_dev_type { + MSS_DEV_TYPE_FWSEC_DEF = 0xF, + MSS_DEV_TYPE_FWSEC_WINLIN_INBOX = 0xC, + MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB = 0xA, + MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB = 0x9, + MSS_DEV_TYPE_FWSEC_NONWIN_INBOX = 0x6, +}; + static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev, enum rtw89_efuse_bank bank) { @@ -354,3 +381,126 @@ int rtw89_read_efuse_ver(struct rtw89_dev *rtwdev, u8 *ecv) return 0; } EXPORT_SYMBOL(rtw89_read_efuse_ver); + +static u8 get_mss_dev_type_idx(struct rtw89_dev *rtwdev, u8 mss_dev_type) +{ + switch (mss_dev_type) { + case MSS_DEV_TYPE_FWSEC_WINLIN_INBOX: + mss_dev_type = 0x0; + break; + case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB: + mss_dev_type = 0x1; + break; + case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB: + mss_dev_type = 0x2; + break; + case MSS_DEV_TYPE_FWSEC_NONWIN_INBOX: + mss_dev_type = 0x3; + break; + case MSS_DEV_TYPE_FWSEC_DEF: + mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF; + break; + default: + rtw89_warn(rtwdev, "unknown mss_dev_type %d", mss_dev_type); + mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_INV; + break; + } + + return mss_dev_type; +} + +int rtw89_efuse_recognize_mss_info_v1(struct rtw89_dev *rtwdev, u8 b1, u8 b2) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; + u8 mss_dev_type; + + if (chip->chip_id == RTL8852B && b1 == 0xFF && b2 == 0x6E) { + mss_dev_type = MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB; + sec->mss_cust_idx = 0; + sec->mss_key_num = 0; + + goto mss_dev_type; + } + + mss_dev_type = u8_get_bits(b1, EFUSE_B1_MSSDEVTYPE_MASK); + sec->mss_cust_idx = 0x1F - (u8_get_bits(b1, EFUSE_B1_MSSCUSTIDX0_MASK) | + u8_get_bits(b2, EFUSE_B2_MSSCUSTIDX1_MASK) << 4); + sec->mss_key_num = 0xF - u8_get_bits(b2, EFUSE_B2_MSSKEYNUM_MASK); + +mss_dev_type: + sec->mss_dev_type = get_mss_dev_type_idx(rtwdev, mss_dev_type); + if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_INV) { + rtw89_warn(rtwdev, "invalid mss_dev_type %d\n", mss_dev_type); + return -ENOENT; + } + + sec->can_mss_v1 = true; + + return 0; +} + +static +int rtw89_efuse_recognize_mss_index_v0(struct rtw89_dev *rtwdev, u8 b1, u8 b2) +{ + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; + u8 externalPN; + u8 serialNum; + u8 customer; + u8 i; + + externalPN = 0xFF - u8_get_bits(b1, EFUSE_B1_EXTERNALPN_MASK); + customer = 0xF - u8_get_bits(b2, EFUSE_B2_CUSTOMER_MASK); + serialNum = 0x7 - u8_get_bits(b2, EFUSE_B2_SERIALNUM_MASK); + + for (i = 0; i < OTP_KEY_INFO_NUM; i++) { + if (externalPN == otp_key_info_externalPN[i] && + customer == otp_key_info_customer[i] && + serialNum == otp_key_info_serialNum[i]) { + sec->mss_idx = i; + sec->can_mss_v0 = true; + return 0; + } + } + + return -ENOENT; +} + +int rtw89_efuse_read_fw_secure_ax(struct rtw89_dev *rtwdev) +{ + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; + u32 sec_addr = EFUSE_EXTERNALPN_ADDR_AX; + u32 sec_size = 2; + u8 sec_map[2]; + u8 b1, b2; + int ret; + + ret = rtw89_dump_physical_efuse_map(rtwdev, sec_map, + sec_addr, sec_size, false); + if (ret) { + rtw89_warn(rtwdev, "failed to dump secsel map\n"); + return ret; + } + + b1 = sec_map[0]; + b2 = sec_map[1]; + + if (b1 == 0xFF && b2 == 0xFF) + return 0; + + rtw89_efuse_recognize_mss_index_v0(rtwdev, b1, b2); + rtw89_efuse_recognize_mss_info_v1(rtwdev, b1, b2); + if (!sec->can_mss_v1 && !sec->can_mss_v0) + goto out; + + sec->secure_boot = true; + +out: + rtw89_debug(rtwdev, RTW89_DBG_FW, + "MSS secure_boot=%d(%d/%d) dev_type=%d cust_idx=%d key_num=%d mss_index=%d\n", + sec->secure_boot, sec->can_mss_v0, sec->can_mss_v1, + sec->mss_dev_type, sec->mss_cust_idx, + sec->mss_key_num, sec->mss_idx); + + return 0; +} diff --git a/drivers/net/wireless/realtek/rtw89/efuse.h b/drivers/net/wireless/realtek/rtw89/efuse.h index 72416f56a071..a96fc1044791 100644 --- a/drivers/net/wireless/realtek/rtw89/efuse.h +++ b/drivers/net/wireless/realtek/rtw89/efuse.h @@ -23,6 +23,8 @@ int rtw89_parse_efuse_map_be(struct rtw89_dev *rtwdev); int rtw89_parse_phycap_map_be(struct rtw89_dev *rtwdev); int rtw89_cnv_efuse_state_be(struct rtw89_dev *rtwdev, bool idle); int rtw89_read_efuse_ver(struct rtw89_dev *rtwdev, u8 *efv); +int rtw89_efuse_recognize_mss_info_v1(struct rtw89_dev *rtwdev, u8 b1, u8 b2); +int rtw89_efuse_read_fw_secure_ax(struct rtw89_dev *rtwdev); int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev); #endif diff --git a/drivers/net/wireless/realtek/rtw89/efuse_be.c b/drivers/net/wireless/realtek/rtw89/efuse_be.c index 0be26d5fdf7c..64768923b0f0 100644 --- a/drivers/net/wireless/realtek/rtw89/efuse_be.c +++ b/drivers/net/wireless/realtek/rtw89/efuse_be.c @@ -8,11 +8,7 @@ #include "reg.h" #define EFUSE_EXTERNALPN_ADDR_BE 0x1580 -#define EFUSE_B1_MSSDEVTYPE_MASK GENMASK(3, 0) -#define EFUSE_B1_MSSCUSTIDX0_MASK GENMASK(7, 4) #define EFUSE_SERIALNUM_ADDR_BE 0x1581 -#define EFUSE_B2_MSSKEYNUM_MASK GENMASK(3, 0) -#define EFUSE_B2_MSSCUSTIDX1_MASK BIT(6) #define EFUSE_SB_CRYP_SEL_ADDR 0x1582 #define EFUSE_SB_CRYP_SEL_SIZE 2 #define EFUSE_SB_CRYP_SEL_DEFAULT 0xFFFF @@ -20,14 +16,6 @@ #define EFUSE_SEC_BE_START 0x1580 #define EFUSE_SEC_BE_SIZE 4 -enum rtw89_efuse_mss_dev_type { - MSS_DEV_TYPE_FWSEC_DEF = 0xF, - MSS_DEV_TYPE_FWSEC_WINLIN_INBOX = 0xC, - MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB = 0xA, - MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB = 0x9, - MSS_DEV_TYPE_FWSEC_NONWIN_INBOX = 0x6, -}; - static const u32 sb_sel_mgn[SB_SEL_MGN_MAX_SIZE] = { 0x8000100, 0xC000180 }; @@ -477,33 +465,6 @@ static u16 get_sb_cryp_sel_idx(u16 sb_cryp_sel) return sb_cryp_sel_v + low_bit; } -static u8 get_mss_dev_type_idx(struct rtw89_dev *rtwdev, u8 mss_dev_type) -{ - switch (mss_dev_type) { - case MSS_DEV_TYPE_FWSEC_WINLIN_INBOX: - mss_dev_type = 0x0; - break; - case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_NON_COB: - mss_dev_type = 0x1; - break; - case MSS_DEV_TYPE_FWSEC_NONLIN_INBOX_COB: - mss_dev_type = 0x2; - break; - case MSS_DEV_TYPE_FWSEC_NONWIN_INBOX: - mss_dev_type = 0x3; - break; - case MSS_DEV_TYPE_FWSEC_DEF: - mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF; - break; - default: - rtw89_warn(rtwdev, "unknown mss_dev_type %d", mss_dev_type); - mss_dev_type = RTW89_FW_MSS_DEV_TYPE_FWSEC_INV; - break; - } - - return mss_dev_type; -} - int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev) { struct rtw89_fw_secure *sec = &rtwdev->fw.sec; @@ -511,7 +472,6 @@ int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev) u32 sec_size = EFUSE_SEC_BE_SIZE; u16 sb_cryp_sel, sb_cryp_sel_idx; u8 sec_map[EFUSE_SEC_BE_SIZE]; - u8 mss_dev_type; u8 b1, b2; int ret; @@ -538,16 +498,9 @@ int rtw89_efuse_read_fw_secure_be(struct rtw89_dev *rtwdev) b1 = sec_map[EFUSE_EXTERNALPN_ADDR_BE - sec_addr]; b2 = sec_map[EFUSE_SERIALNUM_ADDR_BE - sec_addr]; - mss_dev_type = u8_get_bits(b1, EFUSE_B1_MSSDEVTYPE_MASK); - sec->mss_cust_idx = 0x1F - (u8_get_bits(b1, EFUSE_B1_MSSCUSTIDX0_MASK) | - u8_get_bits(b2, EFUSE_B2_MSSCUSTIDX1_MASK) << 4); - sec->mss_key_num = 0xF - u8_get_bits(b2, EFUSE_B2_MSSKEYNUM_MASK); - - sec->mss_dev_type = get_mss_dev_type_idx(rtwdev, mss_dev_type); - if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_INV) { - rtw89_warn(rtwdev, "invalid mss_dev_type %d\n", mss_dev_type); + ret = rtw89_efuse_recognize_mss_info_v1(rtwdev, b1, b2); + if (ret) goto out; - } sec->secure_boot = true; @@ -559,4 +512,3 @@ out: return 0; } -EXPORT_SYMBOL(rtw89_efuse_read_fw_secure_be); diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 2c2fdf21f065..2191c037d72e 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -56,6 +56,11 @@ static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb); static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, struct rtw89_wait_info *wait, unsigned int cond); +static int __parse_security_section(struct rtw89_dev *rtwdev, + struct rtw89_fw_bin_info *info, + struct rtw89_fw_hdr_section_info *section_info, + const void *content, + u32 *mssc_len); static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, bool header) @@ -124,13 +129,16 @@ static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 le struct rtw89_fw_bin_info *info) { const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; + const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_fw_hdr_section_info *section_info; + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; const struct rtw89_fw_dynhdr_hdr *fwdynhdr; const struct rtw89_fw_hdr_section *section; const u8 *fw_end = fw + len; const u8 *bin; u32 base_hdr_len; - u32 mssc_len = 0; + u32 mssc_len; + int ret; u32 i; if (!info) @@ -139,6 +147,7 @@ static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 le info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); base_hdr_len = struct_size(fw_hdr, sections, info->section_num); info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); + info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); if (info->dynamic_hdr_en) { info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); @@ -161,26 +170,47 @@ static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 le section = &fw_hdr->sections[i]; section_info->type = le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); - if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { - section_info->mssc = - le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); - mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; - } else { - section_info->mssc = 0; - } - section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); + if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) section_info->len += FWDL_SECTION_CHKSUM_LEN; section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); section_info->dladdr = le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; section_info->addr = bin; - bin += section_info->len; + + if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { + section_info->mssc = + le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); + + ret = __parse_security_section(rtwdev, info, section_info, + bin, &mssc_len); + if (ret) + return ret; + + if (sec->secure_boot && chip->chip_id == RTL8852B) + section_info->len_override = 960; + } else { + section_info->mssc = 0; + mssc_len = 0; + } + + rtw89_debug(rtwdev, RTW89_DBG_FW, + "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", + i, section_info->type, section_info->len, + section_info->mssc, mssc_len, bin - fw); + rtw89_debug(rtwdev, RTW89_DBG_FW, + " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", + section_info->ignore, section_info->key_addr, + section_info->key_addr ? + section_info->key_addr - section_info->addr : 0, + section_info->key_len, section_info->key_idx); + + bin += section_info->len + mssc_len; section_info++; } - if (fw_end != bin + mssc_len) { + if (fw_end != bin) { rtw89_err(rtwdev, "[ERR]fw bin size\n"); return -EINVAL; } @@ -235,7 +265,6 @@ static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, struct rtw89_fw_bin_info *info, struct rtw89_fw_hdr_section_info *section_info, - const struct rtw89_fw_hdr_section_v1 *section, const void *content, u32 *mssc_len) { @@ -318,18 +347,15 @@ ignore: static int __parse_security_section(struct rtw89_dev *rtwdev, struct rtw89_fw_bin_info *info, struct rtw89_fw_hdr_section_info *section_info, - const struct rtw89_fw_hdr_section_v1 *section, const void *content, u32 *mssc_len) { + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; int ret; - section_info->mssc = - le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); - - if (section_info->mssc == FORMATTED_MSSC) { + if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { ret = __parse_formatted_mssc(rtwdev, info, section_info, - section, content, mssc_len); + content, mssc_len); if (ret) return -EINVAL; } else { @@ -337,6 +363,14 @@ static int __parse_security_section(struct rtw89_dev *rtwdev, if (info->dsp_checksum) *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; + if (sec->secure_boot) { + if (sec->mss_idx >= section_info->mssc) + return -EFAULT; + section_info->key_addr = content + section_info->len + + sec->mss_idx * FWDL_SECURITY_SIGLEN; + section_info->key_len = FWDL_SECURITY_SIGLEN; + } + info->secure_section_exist = true; } @@ -361,6 +395,7 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); base_hdr_len = struct_size(fw_hdr, sections, info->section_num); info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); + info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); if (info->dynamic_hdr_en) { info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); @@ -394,8 +429,11 @@ static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 le section_info->addr = bin; if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { + section_info->mssc = + le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); + ret = __parse_security_section(rtwdev, info, section_info, - section, bin, &mssc_len); + bin, &mssc_len); if (ret) return ret; } else { @@ -1155,9 +1193,24 @@ static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, struct rtw89_fw_bin_info *info, struct rtw89_fw_hdr *fw_hdr) { + struct rtw89_fw_hdr_section_info *section_info; + struct rtw89_fw_hdr_section *section; + int i; + le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, FW_HDR_W7_PART_SIZE); + for (i = 0; i < info->section_num; i++) { + section_info = &info->section_info[i]; + + if (!section_info->len_override) + continue; + + section = &fw_hdr->sections[i]; + le32p_replace_bits(§ion->w1, section_info->len_override, + FWSECTION_HDR_W1_SEC_SIZE); + } + return 0; } @@ -1286,10 +1339,20 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, if (info->ignore) return 0; + if (info->len_override) { + if (info->len_override > info->len) + rtw89_warn(rtwdev, "override length %u larger than original %u\n", + info->len_override, info->len); + else + residue_len = info->len_override; + } + if (info->key_addr && info->key_len) { - if (info->len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) - rtw89_warn(rtwdev, "ignore to copy key data because of len %d, %d, %d\n", - info->len, FWDL_SECTION_PER_PKT_LEN, info->key_len); + if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) + rtw89_warn(rtwdev, + "ignore to copy key data because of len %d, %d, %d, %d\n", + info->len, FWDL_SECTION_PER_PKT_LEN, + info->key_len, residue_len); else copy_key = true; } @@ -1425,6 +1488,8 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, return ret; } + rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); + if (rtwdev->chip->chip_id == RTL8922A && (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); @@ -2508,7 +2573,7 @@ int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_PS, - H2C_FUNC_MAC_LPS_PARM, 0, 1, + H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode, H2C_LPS_PARM_LEN); ret = rtw89_h2c_tx(rtwdev, skb, false); @@ -3795,6 +3860,52 @@ fail: return ret; } +int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) +{ + struct rtw89_h2c_tx_duty *h2c; + u32 len = sizeof(*h2c); + struct sk_buff *skb; + u16 pause, active; + int ret; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); + return -ENOMEM; + } + + skb_put(skb, len); + h2c = (struct rtw89_h2c_tx_duty *)skb->data; + + static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); + + if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { + h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); + } else { + active = 100 - lv * RTW89_THERMAL_PROT_STEP; + pause = 100 - active; + + h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | + le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); + } + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, + H2C_FUNC_TX_DUTY, 0, 0, len); + + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return ret; +} + int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, bool connect) @@ -4965,7 +5076,7 @@ int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, scan_mode = RTW89_SCAN_IMMEDIATE; } else { scan_mode = RTW89_SCAN_DELAY; - tsf += option->delay * RTW89_SCAN_DELAY_TSF_UNIT; + tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT; } } @@ -5260,7 +5371,7 @@ fail: int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) { - struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; + struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; struct rtw89_fw_h2c_rf_get_mccch *mccch; struct sk_buff *skb; int ret; @@ -5307,7 +5418,7 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; struct rtw89_fw_h2c_rfk_pre_info *h2c; - u8 tbl_sel = rfk_mcc->table_idx; + u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; u32 len = sizeof(*h2c); struct sk_buff *skb; u8 ver = U8_MAX; @@ -5331,19 +5442,24 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); + BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { h2c->common.dbcc.ch[path][tbl] = - cpu_to_le32(rfk_mcc->ch[tbl]); + cpu_to_le32(rfk_mcc->data[path].ch[tbl]); h2c->common.dbcc.band[path][tbl] = - cpu_to_le32(rfk_mcc->band[tbl]); + cpu_to_le32(rfk_mcc->data[path].band[tbl]); } } for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { - h2c->common.tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]); - h2c->common.tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]); + tbl_sel[path] = rfk_mcc->data[path].table_idx; + + h2c->common.tbl.cur_ch[path] = + cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); + h2c->common.tbl.cur_band[path] = + cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); } h2c->common.phy_idx = cpu_to_le32(phy_idx); @@ -5351,9 +5467,9 @@ int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; - h2c_v0->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]); - h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]); - h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]); + h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); + h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); + h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); h2c_v0->ktbl_sel0 = cpu_to_le32(val32); @@ -6252,8 +6368,10 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); ch_info->dwell_time = RTW89_DWELL_TIME; + ch_info->pause_data = true; break; case RTW89_CHAN_ACTIVE: + ch_info->pause_data = true; break; default: rtw89_err(rtwdev, "Channel type out of bound\n"); @@ -6352,8 +6470,10 @@ static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); ch_info->dwell_time = RTW89_DWELL_TIME; + ch_info->pause_data = true; break; case RTW89_CHAN_ACTIVE: + ch_info->pause_data = true; break; default: rtw89_warn(rtwdev, "Channel type out of bound\n"); @@ -6675,6 +6795,8 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, if (!rtwvif_link) return; + rtw89_chanctx_proceed(rtwdev); + rtwvif = rtwvif_link->rtwvif; reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); @@ -6692,8 +6814,6 @@ void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, scan_info->last_chan_idx = 0; scan_info->scanning_vif = NULL; scan_info->abort = false; - - rtw89_chanctx_proceed(rtwdev); } void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index d338c3aec725..efa63d444821 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -261,6 +261,7 @@ struct rtw89_fw_hdr_section_info { u8 redl; const u8 *addr; u32 len; + u32 len_override; u32 dladdr; u32 mssc; u8 type; @@ -275,6 +276,7 @@ struct rtw89_fw_bin_info { u32 hdr_len; bool dynamic_hdr_en; u32 dynamic_hdr_len; + u8 idmem_share_mode; bool dsp_checksum; bool secure_section_exist; struct rtw89_fw_hdr_section_info section_info[FWDL_SECTION_MAX_NUM]; @@ -563,6 +565,7 @@ struct rtw89_fw_hdr { #define FW_HDR_W6_SEC_NUM GENMASK(15, 8) #define FW_HDR_W7_PART_SIZE GENMASK(15, 0) #define FW_HDR_W7_DYN_HDR BIT(16) +#define FW_HDR_W7_IDMEM_SHARE_MODE GENMASK(21, 18) #define FW_HDR_W7_CMD_VERSERION GENMASK(31, 24) struct rtw89_fw_hdr_section_v1 { @@ -580,6 +583,7 @@ struct rtw89_fw_hdr_section_v1 { #define FWSECTION_HDR_V1_W1_REDL BIT(29) #define FWSECTION_HDR_V1_W2_MSSC GENMASK(7, 0) #define FORMATTED_MSSC 0xFF +#define FORMATTED_MSSC_MASK GENMASK(7, 0) #define FWSECTION_HDR_V1_W2_BBMCU_IDX GENMASK(27, 24) struct rtw89_fw_hdr_v1 { @@ -615,6 +619,7 @@ struct rtw89_fw_hdr_v1 { #define FW_HDR_V1_W6_DSP_CHKSUM BIT(24) #define FW_HDR_V1_W7_PART_SIZE GENMASK(15, 0) #define FW_HDR_V1_W7_DYN_HDR BIT(16) +#define FW_HDR_V1_W7_IDMEM_SHARE_MODE GENMASK(21, 18) enum rtw89_fw_mss_pool_rmp_tbl_type { MSS_POOL_RMP_TBL_BITMASK = 0x0, @@ -3689,6 +3694,13 @@ struct rtw89_c2h_pkt_ofld_rsp { #define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_OP GENMASK(10, 8) #define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_LEN GENMASK(31, 16) +struct rtw89_c2h_tx_duty_rpt { + struct rtw89_c2h_hdr c2h_hdr; + __le32 w2; +} __packed; + +#define RTW89_C2H_TX_DUTY_RPT_W2_TIMER_ERR GENMASK(2, 0) + struct rtw89_c2h_wow_aoac_report { struct rtw89_c2h_hdr c2h_hdr; u8 rpt_ver; @@ -3713,6 +3725,15 @@ struct rtw89_c2h_wow_aoac_report { #define RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX BIT(0) +struct rtw89_h2c_tx_duty { + __le32 w0; + __le32 w1; +} __packed; + +#define RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK GENMASK(15, 0) +#define RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK GENMASK(31, 16) +#define RTW89_H2C_TX_DUTY_W1_STOP BIT(0) + struct rtw89_h2c_bcnfltr { __le32 w0; } __packed; @@ -4071,6 +4092,7 @@ enum rtw89_fw_ofld_h2c_func { H2C_FUNC_OFLD_CFG = 0x14, H2C_FUNC_ADD_SCANOFLD_CH = 0x16, H2C_FUNC_SCANOFLD = 0x17, + H2C_FUNC_TX_DUTY = 0x18, H2C_FUNC_PKT_DROP = 0x1b, H2C_FUNC_CFG_BCNFLTR = 0x1e, H2C_FUNC_OFLD_RSSI = 0x1f, @@ -4506,6 +4528,7 @@ int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, u8 ac, u32 val); int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev); +int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv); int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, bool connect); diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index e09f926bb4a0..7907b84d204b 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1444,6 +1444,7 @@ void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev) static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) { #define PWR_ACT 1 + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; const struct rtw89_chip_info *chip = rtwdev->chip; const struct rtw89_pwr_cfg * const *cfg_seq; int (*cfg_func)(struct rtw89_dev *rtwdev); @@ -1472,6 +1473,9 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) return ret; if (on) { + if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags)) + mac->efuse_read_fw_secure(rtwdev); + set_bit(RTW89_FLAG_POWERON, rtwdev->flags); set_bit(RTW89_FLAG_DMAC_FUNC, rtwdev->flags); set_bit(RTW89_FLAG_CMAC0_FUNC, rtwdev->flags); @@ -3996,9 +4000,10 @@ fail: static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) { + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; u8 i; - if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX || sec->secure_boot) return; for (i = 0; i < 4; i++) { @@ -4010,7 +4015,9 @@ static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) { - if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; + + if (rtwdev->chip->chip_gen != RTW89_CHIP_AX || sec->secure_boot) return; rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, @@ -5029,6 +5036,18 @@ rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, } static void +rtw89_mac_c2h_tx_duty_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 len) +{ + struct rtw89_c2h_tx_duty_rpt *c2h = + (struct rtw89_c2h_tx_duty_rpt *)skb_c2h->data; + u8 err; + + err = le32_get_bits(c2h->w2, RTW89_C2H_TX_DUTY_RPT_W2_TIMER_ERR); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "C2H TX duty rpt with err=%d\n", err); +} + +static void rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) { @@ -5354,6 +5373,7 @@ void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev, [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL, [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause, [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp, + [RTW89_MAC_C2H_FUNC_TX_DUTY_RPT] = rtw89_mac_c2h_tx_duty_rpt, [RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT] = rtw89_mac_c2h_tsf32_toggle_rpt, [RTW89_MAC_C2H_FUNC_BCNFLTR_RPT] = rtw89_mac_c2h_bcn_fltr_rpt, }; @@ -6604,6 +6624,20 @@ int rtw89_fwdl_check_path_ready_ax(struct rtw89_dev *rtwdev, rtwdev, R_AX_WCPU_FW_CTRL); } +static +void rtw89_fwdl_secure_idmem_share_mode_ax(struct rtw89_dev *rtwdev, u8 mode) +{ + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; + + if (!sec->secure_boot) + return; + + rtw89_write32_mask(rtwdev, R_AX_WCPU_FW_CTRL, + B_AX_IDMEM_SHARE_MODE_RECORD_MASK, mode); + rtw89_write32_set(rtwdev, R_AX_WCPU_FW_CTRL, + B_AX_IDMEM_SHARE_MODE_RECORD_VALID); +} + const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { .band1_offset = RTW89_MAC_AX_BAND_REG_OFFSET, .filter_model_addr = R_AX_FILTER_MODEL_ADDR, @@ -6657,9 +6691,11 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { .fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax, .fwdl_get_status = rtw89_fw_get_rdy_ax, .fwdl_check_path_ready = rtw89_fwdl_check_path_ready_ax, + .fwdl_secure_idmem_share_mode = rtw89_fwdl_secure_idmem_share_mode_ax, .parse_efuse_map = rtw89_parse_efuse_map_ax, .parse_phycap_map = rtw89_parse_phycap_map_ax, .cnv_efuse_state = rtw89_cnv_efuse_state_ax, + .efuse_read_fw_secure = rtw89_efuse_read_fw_secure_ax, .cfg_plt = rtw89_mac_cfg_plt_ax, .get_plt_cnt = rtw89_mac_get_plt_cnt_ax, diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index e59c1fcfea46..18579c020548 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -391,6 +391,7 @@ enum rtw89_mac_c2h_ofld_func { RTW89_MAC_C2H_FUNC_MACID_PAUSE, RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT = 0x6, RTW89_MAC_C2H_FUNC_SCANOFLD_RSP = 0x9, + RTW89_MAC_C2H_FUNC_TX_DUTY_RPT = 0xa, RTW89_MAC_C2H_FUNC_BCNFLTR_RPT = 0xd, RTW89_MAC_C2H_FUNC_OFLD_MAX, }; @@ -984,9 +985,11 @@ struct rtw89_mac_gen_def { bool dlfw, bool include_bb); u8 (*fwdl_get_status)(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type); int (*fwdl_check_path_ready)(struct rtw89_dev *rtwdev, bool h2c_or_fwdl); + void (*fwdl_secure_idmem_share_mode)(struct rtw89_dev *rtwdev, u8 mode); int (*parse_efuse_map)(struct rtw89_dev *rtwdev); int (*parse_phycap_map)(struct rtw89_dev *rtwdev); int (*cnv_efuse_state)(struct rtw89_dev *rtwdev, bool idle); + int (*efuse_read_fw_secure)(struct rtw89_dev *rtwdev); int (*cfg_plt)(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt); u16 (*get_plt_cnt)(struct rtw89_dev *rtwdev, u8 band); @@ -1493,4 +1496,14 @@ int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_rsvd_qt_cfg *cfg); int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable); +static inline +void rtw89_fwdl_secure_idmem_share_mode(struct rtw89_dev *rtwdev, u8 mode) +{ + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + + if (!mac->fwdl_secure_idmem_share_mode) + return; + + return mac->fwdl_secure_idmem_share_mode(rtwdev, mode); +} #endif diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 3f33c3a2ae7d..619d2d3771d5 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -192,6 +192,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list); + INIT_LIST_HEAD(&rtwvif->mgnt_entry); + ether_addr_copy(rtwvif->mac_addr, vif->addr); rtwvif->offchan = false; @@ -1375,6 +1377,7 @@ static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw, rtwvif_link = rtwvif->links[link_conf->link_id]; if (unlikely(!rtwvif_link)) { + mutex_unlock(&rtwdev->mutex); rtw89_err(rtwdev, "%s: rtwvif link (link_id %u) is not active\n", __func__, link_conf->link_id); diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c index 30943462640f..f7a396c8a3cd 100644 --- a/drivers/net/wireless/realtek/rtw89/mac_be.c +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -2600,9 +2600,11 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = { .fwdl_enable_wcpu = rtw89_mac_fwdl_enable_wcpu_be, .fwdl_get_status = fwdl_get_status_be, .fwdl_check_path_ready = rtw89_fwdl_check_path_ready_be, + .fwdl_secure_idmem_share_mode = NULL, .parse_efuse_map = rtw89_parse_efuse_map_be, .parse_phycap_map = rtw89_parse_phycap_map_be, .cnv_efuse_state = rtw89_cnv_efuse_state_be, + .efuse_read_fw_secure = rtw89_efuse_read_fw_secure_be, .cfg_plt = rtw89_mac_cfg_plt_be, .get_plt_cnt = rtw89_mac_get_plt_cnt_be, diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index fc17f9ff757c..f923bec03d41 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -2671,9 +2671,10 @@ static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev) static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev) { const struct rtw89_pci_info *info = rtwdev->pci_info; - u32 ret, check, dma_busy; u32 dma_busy1 = info->dma_busy1.addr; u32 dma_busy2 = info->dma_busy2_reg; + u32 check, dma_busy; + int ret; check = info->dma_busy1.mask; @@ -2698,8 +2699,9 @@ static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev) static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev) { const struct rtw89_pci_info *info = rtwdev->pci_info; - u32 ret, check, dma_busy; u32 dma_busy3 = info->dma_busy3_reg; + u32 check, dma_busy; + int ret; check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; @@ -4209,6 +4211,36 @@ static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) return work_done; } +static +void rtw89_check_pci_ssid_quirks(struct rtw89_dev *rtwdev, + struct pci_dev *pdev, + const struct rtw89_pci_ssid_quirk *ssid_quirks) +{ + int i; + + if (!ssid_quirks) + return; + + for (i = 0; i < 200; i++, ssid_quirks++) { + if (ssid_quirks->vendor == 0 && ssid_quirks->device == 0) + break; + + if (ssid_quirks->vendor != pdev->vendor || + ssid_quirks->device != pdev->device || + ssid_quirks->subsystem_vendor != pdev->subsystem_vendor || + ssid_quirks->subsystem_device != pdev->subsystem_device) + continue; + + bitmap_or(rtwdev->quirks, rtwdev->quirks, &ssid_quirks->bitmap, + NUM_OF_RTW89_QUIRKS); + rtwdev->custid = ssid_quirks->custid; + break; + } + + rtw89_debug(rtwdev, RTW89_DBG_HCI, "quirks=%*ph custid=%d\n", + (int)sizeof(rtwdev->quirks), rtwdev->quirks, rtwdev->custid); +} + static int __maybe_unused rtw89_pci_suspend(struct device *dev) { struct ieee80211_hw *hw = dev_get_drvdata(dev); @@ -4383,6 +4415,7 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; rtw89_check_quirks(rtwdev, info->quirks); + rtw89_check_pci_ssid_quirks(rtwdev, pdev, pci_info->ssid_quirks); SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 796f6cd3c965..b68e2d82eea9 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -1292,6 +1292,19 @@ struct rtw89_pci_gen_def { void (*disable_eq)(struct rtw89_dev *rtwdev); }; +#define RTW89_PCI_SSID(v, d, ssv, ssd, cust) \ + .vendor = v, .device = d, .subsystem_vendor = ssv, .subsystem_device = ssd, \ + .custid = RTW89_CUSTID_ ##cust + +struct rtw89_pci_ssid_quirk { + unsigned short vendor; + unsigned short device; + unsigned short subsystem_vendor; + unsigned short subsystem_device; + enum rtw89_custid custid; + unsigned long bitmap; /* bitmap of rtw89_quirks */ +}; + struct rtw89_pci_info { const struct rtw89_pci_gen_def *gen_def; enum mac_ax_bd_trunc_mode txbd_trunc_mode; @@ -1345,6 +1358,8 @@ struct rtw89_pci_info { void (*recognize_intrs)(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci, struct rtw89_pci_isrs *isrs); + + const struct rtw89_pci_ssid_quirk *ssid_quirks; }; struct rtw89_pci_tx_data { diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index 5a08e3d46bac..f24aca663cf0 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -4836,11 +4836,36 @@ static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev) rtw89_phy_antdiv_reg_init(rtwdev); } +static void rtw89_phy_thermal_protect(struct rtw89_dev *rtwdev) +{ + struct rtw89_phy_stat *phystat = &rtwdev->phystat; + struct rtw89_hal *hal = &rtwdev->hal; + u8 th_max = phystat->last_thermal_max; + u8 lv = hal->thermal_prot_lv; + + if (!hal->thermal_prot_th || + (hal->disabled_dm_bitmap & BIT(RTW89_DM_THERMAL_PROTECT))) + return; + + if (th_max > hal->thermal_prot_th && lv < RTW89_THERMAL_PROT_LV_MAX) + lv++; + else if (th_max < hal->thermal_prot_th - 2 && lv > 0) + lv--; + else + return; + + hal->thermal_prot_lv = lv; + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "thermal protection lv=%d\n", lv); + + rtw89_fw_h2c_tx_duty(rtwdev, hal->thermal_prot_lv); +} + static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev) { struct rtw89_phy_stat *phystat = &rtwdev->phystat; + u8 th, th_max = 0; int i; - u8 th; for (i = 0; i < rtwdev->chip->rf_path_num; i++) { th = rtw89_chip_get_thermal(rtwdev, i); @@ -4850,7 +4875,11 @@ static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev) rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "path(%d) thermal cur=%u avg=%ld", i, th, ewma_thermal_read(&phystat->avg_thermal[i])); + + th_max = max(th_max, th); } + + phystat->last_thermal_max = th_max; } struct rtw89_phy_iter_rssi_data { @@ -4923,6 +4952,8 @@ static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev) memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat)); ewma_rssi_init(&phystat->bcn_rssi); + + rtwdev->hal.thermal_prot_lv = 0; } void rtw89_phy_stat_track(struct rtw89_dev *rtwdev) @@ -4930,6 +4961,7 @@ void rtw89_phy_stat_track(struct rtw89_dev *rtwdev) struct rtw89_phy_stat *phystat = &rtwdev->phystat; rtw89_phy_stat_thermal_update(rtwdev); + rtw89_phy_thermal_protect(rtwdev); rtw89_phy_stat_rssi_update(rtwdev); phystat->last_pkt_stat = phystat->cur_pkt_stat; diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 69678eab2309..18ec7c0252fb 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -196,6 +196,8 @@ #define R_AX_HALT_C2H 0x016C #define R_AX_WCPU_FW_CTRL 0x01E0 +#define B_AX_IDMEM_SHARE_MODE_RECORD_MASK GENMASK(27, 24) +#define B_AX_IDMEM_SHARE_MODE_RECORD_VALID BIT(23) #define B_AX_WCPU_FWDL_STS_MASK GENMASK(7, 5) #define B_AX_FWDL_PATH_RDY BIT(2) #define B_AX_H2C_PATH_RDY BIT(1) diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index bb064a086970..cad5189708e7 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -646,22 +646,44 @@ static void rtw89_regd_apply_policy_unii4(struct rtw89_dev *rtwdev, sband->channels[i].flags |= IEEE80211_CHAN_DISABLED; } -static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev, - struct wiphy *wiphy) +static bool regd_is_6ghz_blocked(struct rtw89_dev *rtwdev) { struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; const struct rtw89_regd *regd = regulatory->regd; - struct ieee80211_supported_band *sband; u8 index; - int i; index = rtw89_regd_get_index(regd); if (index != RTW89_REGD_MAX_COUNTRY_NUM && !test_bit(index, regulatory->block_6ghz)) - return; + return false; rtw89_debug(rtwdev, RTW89_DBG_REGD, "%c%c 6 GHz is blocked by policy\n", regd->alpha2[0], regd->alpha2[1]); + return true; +} + +static bool regd_is_6ghz_not_applicable(struct rtw89_dev *rtwdev) +{ + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_regd *regd = regulatory->regd; + + if (regd->txpwr_regd[RTW89_BAND_6G] != RTW89_NA) + return false; + + rtw89_debug(rtwdev, RTW89_DBG_REGD, "%c%c 6 GHz is N/A in regd map\n", + regd->alpha2[0], regd->alpha2[1]); + return true; +} + +static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev, + struct wiphy *wiphy) +{ + struct ieee80211_supported_band *sband; + int i; + + if (!regd_is_6ghz_blocked(rtwdev) && + !regd_is_6ghz_not_applicable(rtwdev)) + return; sband = wiphy->bands[NL80211_BAND_6GHZ]; if (!sband) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index f9766bf30e71..68c67a763f4d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -282,7 +282,7 @@ static int rtw8851b_pwr_on_func(struct rtw89_dev *rtwdev) { u32 val32; u8 val8; - u32 ret; + int ret; rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN | B_AX_AFSM_PCIE_SUS_EN); @@ -401,7 +401,7 @@ static void rtw8851b_patch_swr_pfm2pwm(struct rtw89_dev *rtwdev) static int rtw8851b_pwr_off_func(struct rtw89_dev *rtwdev) { u32 val32; - u32 ret; + int ret; ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF, XTAL_SI_RFC2RF); @@ -2454,6 +2454,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .wde_qempty_acq_grpnum = 4, .wde_qempty_mgq_grpsel = 4, .rf_base_addr = {0xe000}, + .thermal_th = {0x32, 0x35}, .pwr_on_seq = NULL, .pwr_off_seq = NULL, .bb_table = &rtw89_8851b_phy_bb_table, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c index d334924faec8..651cbce1dd7e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c @@ -60,6 +60,8 @@ static const struct rtw89_pci_info rtw8851b_pci_info = { .enable_intr = rtw89_pci_enable_intr, .disable_intr = rtw89_pci_disable_intr, .recognize_intrs = rtw89_pci_recognize_intrs, + + .ssid_quirks = NULL, }; static const struct rtw89_driver_info rtw89_8851be_info = { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 42d369d2e916..e647759ebd69 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -2170,6 +2170,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .wde_qempty_acq_grpnum = 16, .wde_qempty_mgq_grpsel = 16, .rf_base_addr = {0xc000, 0xd000}, + .thermal_th = {0x32, 0x35}, .pwr_on_seq = pwr_on_seq_8852a, .pwr_off_seq = pwr_off_seq_8852a, .bb_table = &rtw89_8852a_phy_bb_table, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index 9a675e2193bc..701187d69e14 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -58,6 +58,8 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { .enable_intr = rtw89_pci_enable_intr, .disable_intr = rtw89_pci_disable_intr, .recognize_intrs = rtw89_pci_recognize_intrs, + + .ssid_quirks = NULL, }; static const struct rtw89_driver_info rtw89_8852ae_info = { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index 364aa21cbd44..49a319128316 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -254,7 +254,7 @@ static void rtw8852b_pwr_sps_ana(struct rtw89_dev *rtwdev) static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev) { u32 val32; - u32 ret; + int ret; rtw8852b_pwr_sps_ana(rtwdev); @@ -383,7 +383,7 @@ func_en: static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev) { u32 val32; - u32 ret; + int ret; rtw8852b_pwr_sps_ana(rtwdev); @@ -808,6 +808,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .wde_qempty_acq_grpnum = 4, .wde_qempty_mgq_grpsel = 4, .rf_base_addr = {0xe000, 0xf000}, + .thermal_th = {0x32, 0x35}, .pwr_on_seq = NULL, .pwr_off_seq = NULL, .bb_table = &rtw89_8852b_phy_bb_table, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c index ede0ca5426ae..f4aa4437fb75 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c @@ -905,7 +905,6 @@ static void rtw8852bx_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw, { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 rx_path_0; - u32 val; rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, phy_idx); @@ -985,12 +984,11 @@ static void rtw8852bx_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw, rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, pri_ch, phy_idx); - /*Set RF mode at A */ - val = chip_id == RTL8852BT ? 0x333 : 0xaaa; + /*Set RF mode at 3 */ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, val, phy_idx); + B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, val, phy_idx); + B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); break; default: rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c index d8f9d92ca0fb..a13ea1cce4a7 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c @@ -60,6 +60,8 @@ static const struct rtw89_pci_info rtw8852b_pci_info = { .enable_intr = rtw89_pci_enable_intr, .disable_intr = rtw89_pci_disable_intr, .recognize_intrs = rtw89_pci_recognize_intrs, + + .ssid_quirks = NULL, }; static const struct rtw89_driver_info rtw89_8852be_info = { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c index dab7e71ec6a1..876725133228 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c @@ -244,7 +244,7 @@ static const u8 rtw89_btc_8852bt_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, static int rtw8852bt_pwr_on_func(struct rtw89_dev *rtwdev) { u32 val32; - u32 ret; + int ret; rtw89_write32_set(rtwdev, R_AX_LDO_AON_CTRL0, B_AX_PD_REGU_L); rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN | @@ -357,7 +357,7 @@ func_en: static int rtw8852bt_pwr_off_func(struct rtw89_dev *rtwdev) { u32 val32; - u32 ret; + int ret; ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF, XTAL_SI_RFC2RF); @@ -742,6 +742,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = { .wde_qempty_acq_grpnum = 4, .wde_qempty_mgq_grpsel = 4, .rf_base_addr = {0xe000, 0xf000}, + .thermal_th = {0x32, 0x35}, .pwr_on_seq = NULL, .pwr_off_seq = NULL, .bb_table = NULL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c index 702948119646..e4f40c2e287d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bte.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bte.c @@ -60,6 +60,8 @@ static const struct rtw89_pci_info rtw8852bt_pci_info = { .enable_intr = rtw89_pci_enable_intr, .disable_intr = rtw89_pci_disable_intr, .recognize_intrs = rtw89_pci_recognize_intrs, + + .ssid_quirks = NULL, }; static const struct rtw89_driver_info rtw89_8852bte_info = { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index dbe77abb2c48..cde34f8e1e67 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -203,7 +203,7 @@ static void rtw8852c_ctrl_tx_path_tmac(struct rtw89_dev *rtwdev, u8 tx_path, static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev) { u32 val32; - u32 ret; + int ret; val32 = rtw89_read32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_PAD_HCI_SEL_V2_MASK); if (val32 == MAC_AX_HCI_SEL_PCIE_USB) @@ -324,7 +324,7 @@ static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev) static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev) { u32 val32; - u32 ret; + int ret; ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF, XTAL_SI_RFC2RF); @@ -2947,6 +2947,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .wde_qempty_acq_grpnum = 16, .wde_qempty_mgq_grpsel = 16, .rf_base_addr = {0xe000, 0xf000}, + .thermal_th = {0x32, 0x35}, .pwr_on_seq = NULL, .pwr_off_seq = NULL, .bb_table = &rtw89_8852c_phy_bb_table, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 3281ee9d7523..bd17c0a1c684 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -1065,7 +1065,7 @@ static bool _iqk_nbtxk(struct rtw89_dev *rtwdev, static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path) { - struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; + struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; u8 idx = rfk_mcc->table_idx; bool is_fail1, is_fail2; @@ -1408,7 +1408,7 @@ static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) { - struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; + struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; u8 idx = 0; idx = rfk_mcc->table_idx; @@ -4105,7 +4105,7 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { - struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; + struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {}; const struct rtw89_chan *chan; enum rtw89_entity_mode mode; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index 8aaad7d58c0d..1a46878be96b 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -67,6 +67,8 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .enable_intr = rtw89_pci_enable_intr_v1, .disable_intr = rtw89_pci_disable_intr_v1, .recognize_intrs = rtw89_pci_recognize_intrs_v1, + + .ssid_quirks = NULL, }; static const struct dmi_system_id rtw8852c_pci_quirks[] = { diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c index 58c9721ac3ab..9a4db04a1967 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c @@ -2,6 +2,7 @@ /* Copyright(c) 2023 Realtek Corporation */ +#include "chan.h" #include "coex.h" #include "debug.h" #include "efuse.h" @@ -398,9 +399,6 @@ static int rtw8922a_pwr_on_func(struct rtw89_dev *rtwdev) rtw89_write32_set(rtwdev, R_BE_FEN_RST_ENABLE, B_BE_FEN_BB_IP_RSTN | B_BE_FEN_BBPLAT_RSTB); - if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags)) - rtw89_efuse_read_fw_secure_be(rtwdev); - return 0; } @@ -965,6 +963,42 @@ static const struct rtw8922a_bb_gain bb_gain_tia[TIA_GAIN_NUM] = { .gain_g_mask = 0x1FF, .gain_a_mask = 0x3FE00 }, }; +static const struct rtw8922a_bb_gain bb_op1db_lna[LNA_GAIN_NUM] = { + { .gain_g = {0x40ac, 0x44ac}, .gain_a = {0x4078, 0x4478}, + .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF000000}, + { .gain_g = {0x40ac, 0x44ac}, .gain_a = {0x407c, 0x447c}, + .gain_g_mask = 0xFF0000, .gain_a_mask = 0xFF}, + { .gain_g = {0x40ac, 0x44ac}, .gain_a = {0x407c, 0x447c}, + .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF00}, + { .gain_g = {0x40b0, 0x44b0}, .gain_a = {0x407c, 0x447c}, + .gain_g_mask = 0xFF, .gain_a_mask = 0xFF0000}, + { .gain_g = {0x40b0, 0x44b0}, .gain_a = {0x407c, 0x447c}, + .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF000000}, + { .gain_g = {0x40b0, 0x44b0}, .gain_a = {0x4080, 0x4480}, + .gain_g_mask = 0xFF0000, .gain_a_mask = 0xFF}, + { .gain_g = {0x40b0, 0x44b0}, .gain_a = {0x4080, 0x4480}, + .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF00}, +}; + +static const struct rtw8922a_bb_gain bb_op1db_tia_lna[TIA_LNA_OP1DB_NUM] = { + { .gain_g = {0x40b4, 0x44b4}, .gain_a = {0x4080, 0x4480}, + .gain_g_mask = 0xFF0000, .gain_a_mask = 0xFF000000}, + { .gain_g = {0x40b4, 0x44b4}, .gain_a = {0x4084, 0x4484}, + .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF}, + { .gain_g = {0x40b8, 0x44b8}, .gain_a = {0x4084, 0x4484}, + .gain_g_mask = 0xFF, .gain_a_mask = 0xFF00}, + { .gain_g = {0x40b8, 0x44b8}, .gain_a = {0x4084, 0x4484}, + .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF0000}, + { .gain_g = {0x40b8, 0x44b8}, .gain_a = {0x4084, 0x4484}, + .gain_g_mask = 0xFF0000, .gain_a_mask = 0xFF000000}, + { .gain_g = {0x40b8, 0x44b8}, .gain_a = {0x4088, 0x4488}, + .gain_g_mask = 0xFF000000, .gain_a_mask = 0xFF}, + { .gain_g = {0x40bc, 0x44bc}, .gain_a = {0x4088, 0x4488}, + .gain_g_mask = 0xFF, .gain_a_mask = 0xFF00}, + { .gain_g = {0x40bc, 0x44bc}, .gain_a = {0x4088, 0x4488}, + .gain_g_mask = 0xFF00, .gain_a_mask = 0xFF0000}, +}; + struct rtw8922a_bb_gain_bypass { u32 gain_g[BB_PATH_NUM_8922A]; u32 gain_a[BB_PATH_NUM_8922A]; @@ -1056,6 +1090,30 @@ static void rtw8922a_set_lna_tia_gain(struct rtw89_dev *rtwdev, val = gain->tia_gain[gain_band][bw_type][path][i]; rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx); } + + for (i = 0; i < LNA_GAIN_NUM; i++) { + if (chan->band_type == RTW89_BAND_2G) { + reg = bb_op1db_lna[i].gain_g[path]; + mask = bb_op1db_lna[i].gain_g_mask; + } else { + reg = bb_op1db_lna[i].gain_a[path]; + mask = bb_op1db_lna[i].gain_a_mask; + } + val = gain->lna_op1db[gain_band][bw_type][path][i]; + rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx); + } + + for (i = 0; i < TIA_LNA_OP1DB_NUM; i++) { + if (chan->band_type == RTW89_BAND_2G) { + reg = bb_op1db_tia_lna[i].gain_g[path]; + mask = bb_op1db_tia_lna[i].gain_g_mask; + } else { + reg = bb_op1db_tia_lna[i].gain_a[path]; + mask = bb_op1db_tia_lna[i].gain_a_mask; + } + val = gain->tia_lna_op1db[gain_band][bw_type][path][i]; + rtw89_phy_write32_idx(rtwdev, reg, mask, val, phy_idx); + } } static void rtw8922a_set_gain(struct rtw89_dev *rtwdev, @@ -1745,7 +1803,7 @@ static void rtw8922a_digital_pwr_comp(struct rtw89_dev *rtwdev, static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode mode) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); + const struct rtw89_chan *chan0, *chan1; if (mode == MLO_1_PLUS_1_1RF || mode == DBCC_LEGACY) { rtw89_phy_write32_mask(rtwdev, R_DBCC, B_DBCC_EN, 0x1); @@ -1758,13 +1816,20 @@ static int rtw8922a_ctrl_mlo(struct rtw89_dev *rtwdev, enum rtw89_mlo_dbcc_mode return -EOPNOTSUPP; } - if (mode == MLO_2_PLUS_0_1RF) { - rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_A); - rtw8922a_ctrl_afe_dac(rtwdev, chan->band_width, RF_PATH_B); + if (mode == MLO_1_PLUS_1_1RF) { + chan0 = rtw89_mgnt_chan_get(rtwdev, 0); + chan1 = rtw89_mgnt_chan_get(rtwdev, 1); + } else if (mode == MLO_0_PLUS_2_1RF) { + chan1 = rtw89_mgnt_chan_get(rtwdev, 1); + chan0 = chan1; } else { - rtw89_warn(rtwdev, "unsupported MLO mode %d\n", mode); + chan0 = rtw89_mgnt_chan_get(rtwdev, 0); + chan1 = chan0; } + rtw8922a_ctrl_afe_dac(rtwdev, chan0->band_width, RF_PATH_A); + rtw8922a_ctrl_afe_dac(rtwdev, chan1->band_width, RF_PATH_B); + rtw89_phy_write32_mask(rtwdev, R_EMLSR, B_EMLSR_PARM, 0x6180); if (mode == MLO_2_PLUS_0_1RF) { @@ -2218,10 +2283,12 @@ static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) static u8 rtw8922a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) { struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + struct rtw89_hal *hal = &rtwdev->hal; int th; - /* read thermal only if debugging */ - if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_CFO | RTW89_DBG_RFK_TRACK)) + /* read thermal only if debugging or thermal protection enabled */ + if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_CFO | RTW89_DBG_RFK_TRACK) && + !hal->thermal_prot_th) return 80; rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); @@ -2652,6 +2719,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .wde_qempty_acq_grpnum = 4, .wde_qempty_mgq_grpsel = 4, .rf_base_addr = {0xe000, 0xf000}, + .thermal_th = {0xad, 0xb4}, .pwr_on_seq = NULL, .pwr_off_seq = NULL, .bb_table = NULL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c index 28907df7407d..c4c93f836a2f 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c @@ -252,49 +252,58 @@ static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx) } } -static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev) +static u8 rtw8922a_chlk_reload_sel_tbl(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, u8 path) { struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {}; - enum rtw89_chanctx_idx chanctx_idx; - const struct rtw89_chan *chan; - enum rtw89_entity_mode mode; - u8 s0_tbl, s1_tbl; u8 tbl_sel; - mode = rtw89_get_entity_mode(rtwdev); - switch (mode) { - case RTW89_ENTITY_MODE_MCC_PREPARE: - chanctx_idx = RTW89_CHANCTX_1; - break; - default: - chanctx_idx = RTW89_CHANCTX_0; - break; - } - - chan = rtw89_chan_get(rtwdev, chanctx_idx); - for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) { struct rtw89_rfk_chan_desc *p = &desc[tbl_sel]; - p->ch = rfk_mcc->ch[tbl_sel]; + p->ch = rfk_mcc->data[path].ch[tbl_sel]; p->has_band = true; - p->band = rfk_mcc->band[tbl_sel]; + p->band = rfk_mcc->data[path].band[tbl_sel]; p->has_bw = true; - p->bw = rfk_mcc->bw[tbl_sel]; + p->bw = rfk_mcc->data[path].bw[tbl_sel]; } tbl_sel = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan); - rfk_mcc->ch[tbl_sel] = chan->channel; - rfk_mcc->band[tbl_sel] = chan->band_type; - rfk_mcc->bw[tbl_sel] = chan->band_width; - rfk_mcc->table_idx = tbl_sel; + rfk_mcc->data[path].ch[tbl_sel] = chan->channel; + rfk_mcc->data[path].band[tbl_sel] = chan->band_type; + rfk_mcc->data[path].bw[tbl_sel] = chan->band_width; + rfk_mcc->data[path].table_idx = tbl_sel; + + return tbl_sel; +} + +static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chan *chan0, *chan1; + u8 s0_tbl, s1_tbl; + + switch (rtwdev->mlo_dbcc_mode) { + default: + case MLO_2_PLUS_0_1RF: + chan0 = rtw89_mgnt_chan_get(rtwdev, 0); + chan1 = chan0; + break; + case MLO_0_PLUS_2_1RF: + chan1 = rtw89_mgnt_chan_get(rtwdev, 1); + chan0 = chan1; + break; + case MLO_1_PLUS_1_1RF: + chan0 = rtw89_mgnt_chan_get(rtwdev, 0); + chan1 = rtw89_mgnt_chan_get(rtwdev, 1); + break; + } - s0_tbl = tbl_sel; - s1_tbl = tbl_sel; + s0_tbl = rtw8922a_chlk_reload_sel_tbl(rtwdev, chan0, 0); + s1_tbl = rtw8922a_chlk_reload_sel_tbl(rtwdev, chan1, 1); rtw8922a_chlk_ktbl_sel(rtwdev, RF_A, s0_tbl); rtw8922a_chlk_ktbl_sel(rtwdev, RF_B, s1_tbl); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c index 47f855a7a268..edfb1f220af0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c @@ -9,6 +9,12 @@ #include "reg.h" #include "rtw8922a.h" +static const struct rtw89_pci_ssid_quirk rtw8922a_pci_ssid_quirks[] = { + {RTW89_PCI_SSID(PCI_VENDOR_ID_REALTEK, 0x8922, 0x10EC, 0xA891, DELL), + .bitmap = BIT(RTW89_QUIRK_THERMAL_PROT_120C)}, + {}, +}; + static const struct rtw89_pci_info rtw8922a_pci_info = { .gen_def = &rtw89_pci_gen_be, .txbd_trunc_mode = MAC_AX_BD_TRUNC, @@ -58,6 +64,8 @@ static const struct rtw89_pci_info rtw8922a_pci_info = { .enable_intr = rtw89_pci_enable_intr_v2, .disable_intr = rtw89_pci_disable_intr_v2, .recognize_intrs = rtw89_pci_recognize_intrs_v2, + + .ssid_quirks = rtw8922a_pci_ssid_quirks, }; static const struct rtw89_driver_info rtw89_8922ae_info = { diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c index 27826d909785..bcc287771b2a 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.c +++ b/drivers/net/wireless/realtek/rtw89/sar.c @@ -27,8 +27,8 @@ static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev, return RTW89_SAR_5GHZ_SUBBAND_1_2; case 5500 ... 5720: return RTW89_SAR_5GHZ_SUBBAND_2_E; - case 5745 ... 5825: - return RTW89_SAR_5GHZ_SUBBAND_3; + case 5745 ... 5885: + return RTW89_SAR_5GHZ_SUBBAND_3_4; case 5955 ... 6155: return RTW89_SAR_6GHZ_SUBBAND_5_L; case 6175 ... 6415: @@ -295,7 +295,7 @@ static const struct cfg80211_sar_freq_ranges rtw89_common_sar_freq_ranges[] = { { .start_freq = 2412, .end_freq = 2484, }, { .start_freq = 5180, .end_freq = 5320, }, { .start_freq = 5500, .end_freq = 5720, }, - { .start_freq = 5745, .end_freq = 5825, }, + { .start_freq = 5745, .end_freq = 5885, }, { .start_freq = 5955, .end_freq = 6155, }, { .start_freq = 6175, .end_freq = 6415, }, { .start_freq = 6435, .end_freq = 6515, }, diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c index e7198520bdff..64441c8bc460 100644 --- a/drivers/net/wireless/silabs/wfx/main.c +++ b/drivers/net/wireless/silabs/wfx/main.c @@ -480,10 +480,23 @@ static int __init wfx_core_init(void) { int ret = 0; - if (IS_ENABLED(CONFIG_SPI)) + if (IS_ENABLED(CONFIG_SPI)) { ret = spi_register_driver(&wfx_spi_driver); - if (IS_ENABLED(CONFIG_MMC) && !ret) + if (ret) + goto out; + } + if (IS_ENABLED(CONFIG_MMC)) { ret = sdio_register_driver(&wfx_sdio_driver); + if (ret) + goto unregister_spi; + } + + return 0; + +unregister_spi: + if (IS_ENABLED(CONFIG_SPI)) + spi_unregister_driver(&wfx_spi_driver); +out: return ret; } module_init(wfx_core_init); diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c index 4f346fb977a9..862964a8cc87 100644 --- a/drivers/net/wireless/st/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c @@ -450,7 +450,7 @@ static int __maybe_unused cw1200_spi_suspend(struct device *dev) { struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev)); - if (!cw1200_can_suspend(self->core)) + if (self && !cw1200_can_suspend(self->core)) return -EAGAIN; /* XXX notify host that we have to keep CW1200 powered on? */ diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c index b26d42b4e3cc..ffbf54776330 100644 --- a/drivers/net/wireless/ti/wl12xx/main.c +++ b/drivers/net/wireless/ti/wl12xx/main.c @@ -1939,7 +1939,7 @@ MODULE_DEVICE_TABLE(platform, wl12xx_id_table); static struct platform_driver wl12xx_driver = { .probe = wl12xx_probe, - .remove_new = wl12xx_remove, + .remove = wl12xx_remove, .id_table = wl12xx_id_table, .driver = { .name = "wl12xx_driver", diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 39d8eebb9b6e..4be1110bac88 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -2097,9 +2097,9 @@ MODULE_DEVICE_TABLE(platform, wl18xx_id_table); static struct platform_driver wl18xx_driver = { .probe = wl18xx_probe, - .remove_new = wlcore_remove, + .remove = wlcore_remove, .id_table = wl18xx_id_table, - .driver = { + .driver = { .name = "wl18xx_driver", } }; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index b149b638453f..855b42c92284 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3795,7 +3795,8 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { if (ns->head->ns_id == nsid) { if (!nvme_get_ns(ns)) continue; @@ -4879,7 +4880,8 @@ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mark_disk_dead(ns->disk); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4891,7 +4893,8 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mq_unfreeze_queue(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); @@ -4904,7 +4907,8 @@ int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); if (timeout <= 0) break; @@ -4920,7 +4924,8 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mq_freeze_queue_wait(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4933,7 +4938,8 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl) set_bit(NVME_CTRL_FROZEN, &ctrl->flags); srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_freeze_queue_start(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4981,7 +4987,8 @@ void nvme_sync_io_queues(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_sync_queue(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 14b60abd6afc..01a8d0487918 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1379,6 +1379,8 @@ static const struct regulator_desc rk809_reg[] = { .n_linear_ranges = ARRAY_SIZE(rk817_buck1_voltage_ranges), .vsel_reg = RK817_BUCK3_ON_VSEL_REG, .vsel_mask = RK817_BUCK_VSEL_MASK, + .apply_reg = RK817_POWER_CONFIG, + .apply_bit = RK817_BUCK3_FB_RES_INTER, .enable_reg = RK817_POWER_EN_REG(0), .enable_mask = ENABLE_MASK(RK817_ID_DCDC3), .enable_val = ENABLE_MASK(RK817_ID_DCDC3), diff --git a/drivers/regulator/rtq2208-regulator.c b/drivers/regulator/rtq2208-regulator.c index a5c126afc648..5925fa7a9a06 100644 --- a/drivers/regulator/rtq2208-regulator.c +++ b/drivers/regulator/rtq2208-regulator.c @@ -568,7 +568,7 @@ static int rtq2208_probe(struct i2c_client *i2c) struct regmap *regmap; struct rtq2208_regulator_desc *rdesc[RTQ2208_LDO_MAX]; struct regulator_dev *rdev; - struct regulator_config cfg; + struct regulator_config cfg = {}; struct rtq2208_rdev_map *rdev_map; int i, ret = 0, idx, n_regulator = 0; unsigned int regulator_idx_table[RTQ2208_LDO_MAX], diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index ee2b74238758..6ab27f4f4878 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -188,8 +188,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); while (bufsize >= SECTOR_SIZE) { - buf = __vmalloc(bufsize, - GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY); + buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); if (buf) { *buflen = bufsize; return buf; diff --git a/drivers/staging/media/av7110/av7110.h b/drivers/staging/media/av7110/av7110.h index ec461fd187af..b584754f4be0 100644 --- a/drivers/staging/media/av7110/av7110.h +++ b/drivers/staging/media/av7110/av7110.h @@ -88,6 +88,8 @@ struct infrared { u32 ir_config; }; +#define MAX_CI_SLOTS 2 + /* place to store all the necessary device information */ struct av7110 { /* devices */ @@ -163,7 +165,7 @@ struct av7110 { /* CA */ - struct ca_slot_info ci_slot[2]; + struct ca_slot_info ci_slot[MAX_CI_SLOTS]; enum av7110_video_mode vidmode; struct dmxdev dmxdev; diff --git a/drivers/staging/media/av7110/av7110_ca.c b/drivers/staging/media/av7110/av7110_ca.c index 6ce212c64e5d..fce4023c9dea 100644 --- a/drivers/staging/media/av7110/av7110_ca.c +++ b/drivers/staging/media/av7110/av7110_ca.c @@ -26,23 +26,28 @@ void CI_handle(struct av7110 *av7110, u8 *data, u16 len) { + unsigned slot_num; + dprintk(8, "av7110:%p\n", av7110); if (len < 3) return; switch (data[0]) { case CI_MSG_CI_INFO: - if (data[2] != 1 && data[2] != 2) + if (data[2] != 1 && data[2] != MAX_CI_SLOTS) break; + + slot_num = array_index_nospec(data[2] - 1, MAX_CI_SLOTS); + switch (data[1]) { case 0: - av7110->ci_slot[data[2] - 1].flags = 0; + av7110->ci_slot[slot_num].flags = 0; break; case 1: - av7110->ci_slot[data[2] - 1].flags |= CA_CI_MODULE_PRESENT; + av7110->ci_slot[slot_num].flags |= CA_CI_MODULE_PRESENT; break; case 2: - av7110->ci_slot[data[2] - 1].flags |= CA_CI_MODULE_READY; + av7110->ci_slot[slot_num].flags |= CA_CI_MODULE_READY; break; } break; @@ -262,15 +267,19 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) case CA_GET_SLOT_INFO: { struct ca_slot_info *info = (struct ca_slot_info *)parg; + unsigned int slot_num; if (info->num < 0 || info->num > 1) { mutex_unlock(&av7110->ioctl_mutex); return -EINVAL; } - av7110->ci_slot[info->num].num = info->num; - av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ? - CA_CI_LINK : CA_CI; - memcpy(info, &av7110->ci_slot[info->num], sizeof(struct ca_slot_info)); + slot_num = array_index_nospec(info->num, MAX_CI_SLOTS); + + av7110->ci_slot[slot_num].num = info->num; + av7110->ci_slot[slot_num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ? + CA_CI_LINK : CA_CI; + memcpy(info, &av7110->ci_slot[slot_num], + sizeof(struct ca_slot_info)); break; } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 3dbeffc650d3..6c488b1e2624 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -593,7 +593,7 @@ vchiq_platform_init_state(struct vchiq_state *state) { struct vchiq_arm_state *platform_state; - platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL); + platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); if (!platform_state) return -ENOMEM; @@ -1731,7 +1731,7 @@ static int vchiq_probe(struct platform_device *pdev) return -ENOENT; } - mgmt = kzalloc(sizeof(*mgmt), GFP_KERNEL); + mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); if (!mgmt) return -ENOMEM; @@ -1789,8 +1789,6 @@ static void vchiq_remove(struct platform_device *pdev) arm_state = vchiq_platform_get_arm_state(&mgmt->state); kthread_stop(arm_state->ka_thread); - - kfree(mgmt); } static struct platform_driver vchiq_driver = { diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c index 5225b3621a56..d2d49264cf83 100644 --- a/drivers/thermal/qcom/lmh.c +++ b/drivers/thermal/qcom/lmh.c @@ -73,7 +73,14 @@ static struct irq_chip lmh_irq_chip = { static int lmh_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct lmh_hw_data *lmh_data = d->host_data; + static struct lock_class_key lmh_lock_key; + static struct lock_class_key lmh_request_key; + /* + * This lock class tells lockdep that GPIO irqs are in a different + * category than their parents, so it won't report false recursion. + */ + irq_set_lockdep_class(irq, &lmh_lock_key, &lmh_request_key); irq_set_chip_and_handler(irq, &lmh_irq_chip, handle_simple_irq); irq_set_chip_data(irq, lmh_data); diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index a4caf7899f8e..07e09897165f 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -99,18 +99,15 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n struct device_node *trips; int ret, count; + *ntrips = 0; + trips = of_get_child_by_name(np, "trips"); - if (!trips) { - pr_err("Failed to find 'trips' node\n"); - return ERR_PTR(-EINVAL); - } + if (!trips) + return NULL; count = of_get_child_count(trips); - if (!count) { - pr_err("No trip point defined\n"); - ret = -EINVAL; - goto out_of_node_put; - } + if (!count) + return NULL; tt = kzalloc(sizeof(*tt) * count, GFP_KERNEL); if (!tt) { @@ -133,7 +130,6 @@ static struct thermal_trip *thermal_of_trips_init(struct device_node *np, int *n out_kfree: kfree(tt); - *ntrips = 0; out_of_node_put: of_node_put(trips); @@ -401,11 +397,14 @@ static struct thermal_zone_device *thermal_of_zone_register(struct device_node * trips = thermal_of_trips_init(np, &ntrips); if (IS_ERR(trips)) { - pr_err("Failed to find trip points for %pOFn id=%d\n", sensor, id); + pr_err("Failed to parse trip points for %pOFn id=%d\n", sensor, id); ret = PTR_ERR(trips); goto out_of_node_put; } + if (!trips) + pr_info("No trip points found for %pOFn id=%d\n", sensor, id); + ret = thermal_of_monitor_init(np, &delay, &pdelay); if (ret) { pr_err("Failed to initialize monitoring delays from %pOFn\n", np); diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 7db9869a9f3f..89d2919d0193 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -532,6 +532,8 @@ int tb_retimer_scan(struct tb_port *port, bool add) } ret = 0; + if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)) + max = min(last_idx, max); /* Add retimers if they do not exist already */ for (i = 1; i <= max; i++) { diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 0a9b4aeb3fa1..402fdf8b1cde 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -48,7 +48,7 @@ enum usb4_ba_index { /* Delays in us used with usb4_port_wait_for_bit() */ #define USB4_PORT_DELAY 50 -#define USB4_PORT_SB_DELAY 5000 +#define USB4_PORT_SB_DELAY 1000 static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, u8 *status, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index f5846598d80e..abbe7135a977 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -8636,6 +8636,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba) ufshcd_init_clk_scaling_sysfs(hba); } + /* + * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev + * pointer and hence must only be started after the WLUN pointer has + * been initialized by ufshcd_scsi_add_wlus(). + */ + schedule_delayed_work(&hba->ufs_rtc_update_work, + msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); + ufs_bsg_probe(hba); scsi_scan_host(hba->host); @@ -8795,8 +8803,6 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ufshcd_force_reset_auto_bkops(hba); ufshcd_set_timestamp_attr(hba); - schedule_delayed_work(&hba->ufs_rtc_update_work, - msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS)); /* Gear up to HS gear if supported */ if (hba->max_pwr_info.is_valid) { diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 427e5660f87c..98114c2827c0 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -2342,10 +2342,18 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) u32 reg; int i; - dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & - DWC3_GUSB2PHYCFG_SUSPHY) || - (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) & - DWC3_GUSB3PIPECTL_SUSPHY); + if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { + dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & + DWC3_GUSB2PHYCFG_SUSPHY) || + (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) & + DWC3_GUSB3PIPECTL_SUSPHY); + /* + * TI AM62 platform requires SUSPHY to be + * enabled for system suspend to work. + */ + if (!dwc->susphy_state) + dwc3_enable_susphy(dwc, true); + } switch (dwc->current_dr_role) { case DWC3_GCTL_PRTCAP_DEVICE: @@ -2398,15 +2406,6 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) break; } - if (!PMSG_IS_AUTO(msg)) { - /* - * TI AM62 platform requires SUSPHY to be - * enabled for system suspend to work. - */ - if (!dwc->susphy_state) - dwc3_enable_susphy(dwc, true); - } - return 0; } diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index d54283fd026b..05b6e7e52e02 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c @@ -293,8 +293,6 @@ static int sunxi_musb_exit(struct musb *musb) if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags)) sunxi_sram_release(musb->controller->parent); - devm_usb_put_phy(glue->dev, glue->xceiv); - return 0; } diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index c7d6b5e3f898..28c71d99e857 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -770,11 +770,12 @@ static void edge_bulk_out_data_callback(struct urb *urb) static void edge_bulk_out_cmd_callback(struct urb *urb) { struct edgeport_port *edge_port = urb->context; + struct device *dev = &urb->dev->dev; int status = urb->status; atomic_dec(&CmdUrbs); - dev_dbg(&urb->dev->dev, "%s - FREE URB %p (outstanding %d)\n", - __func__, urb, atomic_read(&CmdUrbs)); + dev_dbg(dev, "%s - FREE URB %p (outstanding %d)\n", __func__, urb, + atomic_read(&CmdUrbs)); /* clean up the transfer buffer */ @@ -784,8 +785,7 @@ static void edge_bulk_out_cmd_callback(struct urb *urb) usb_free_urb(urb); if (status) { - dev_dbg(&urb->dev->dev, - "%s - nonzero write bulk status received: %d\n", + dev_dbg(dev, "%s - nonzero write bulk status received: %d\n", __func__, status); return; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 4f18f189f309..9ba5584061c8 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_VENDOR_ID 0x2c7c /* These Quectel products use Quectel's vendor ID */ #define QUECTEL_PRODUCT_EC21 0x0121 +#define QUECTEL_PRODUCT_RG650V 0x0122 #define QUECTEL_PRODUCT_EM061K_LTA 0x0123 #define QUECTEL_PRODUCT_EM061K_LMS 0x0124 #define QUECTEL_PRODUCT_EC25 0x0125 @@ -1273,6 +1274,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG916Q, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, @@ -2320,6 +2323,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0xff, 0x30) }, /* Fibocom FG132 Diag */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0xff, 0x40) }, /* Fibocom FG132 AT */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0112, 0xff, 0, 0) }, /* Fibocom FG132 NMEA */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */ .driver_info = RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index c7de9585feb2..13c664317a05 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ {DEVICE_SWI(0x1199, 0x90d2)}, /* Sierra Wireless EM9191 QDL */ + {DEVICE_SWI(0x1199, 0x90e4)}, /* Sierra Wireless EM86xx QDL*/ + {DEVICE_SWI(0x1199, 0x90e5)}, /* Sierra Wireless EM86xx */ {DEVICE_SWI(0x1199, 0xc080)}, /* Sierra Wireless EM7590 QDL */ {DEVICE_SWI(0x1199, 0xc081)}, /* Sierra Wireless EM7590 */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c index 5b7f52b74a40..726423684bae 100644 --- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c +++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c @@ -227,6 +227,10 @@ qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pd spin_lock_irqsave(&pmic_typec_pdphy->lock, flags); + hdr_len = sizeof(msg->header); + txbuf_len = pd_header_cnt_le(msg->header) * 4; + txsize_len = hdr_len + txbuf_len - 1; + ret = regmap_read(pmic_typec_pdphy->regmap, pmic_typec_pdphy->base + USB_PDPHY_RX_ACKNOWLEDGE_REG, &val); @@ -244,10 +248,6 @@ qcom_pmic_typec_pdphy_pd_transmit_payload(struct pmic_typec_pdphy *pmic_typec_pd if (ret) goto done; - hdr_len = sizeof(msg->header); - txbuf_len = pd_header_cnt_le(msg->header) * 4; - txsize_len = hdr_len + txbuf_len - 1; - /* Write message header sizeof(u16) to USB_PDPHY_TX_BUFFER_HDR_REG */ ret = regmap_bulk_write(pmic_typec_pdphy->regmap, pmic_typec_pdphy->base + USB_PDPHY_TX_BUFFER_HDR_REG, diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index ba58d11907bc..bccfc03b5986 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -482,6 +482,8 @@ static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc, port = uc->orig; new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd); + if (new_cam >= ARRAY_SIZE(uc->updated)) + return; new_port = &uc->updated[new_cam]; cam = new_port->linked_idx; enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd); diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 472daa588a9d..d5507b63b6cd 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -108,7 +108,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) u32 i; ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos); - if (ret < 0) { + if (ret) { IFCVF_ERR(pdev, "Failed to read PCI capability list\n"); return -EIO; } diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 2dd21e0b399e..7d0c83b5b071 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -373,7 +373,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr struct page *pg; unsigned int nsg; int sglen; - u64 pa; + u64 pa, offset; u64 paend; struct scatterlist *sg; struct device *dma = mvdev->vdev.dma_dev; @@ -396,8 +396,10 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr sg = mr->sg_head.sgl; for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { - paend = map->addr + maplen(map, mr); - for (pa = map->addr; pa < paend; pa += sglen) { + offset = mr->start > map->start ? mr->start - map->start : 0; + pa = map->addr + offset; + paend = map->addr + offset + maplen(map, mr); + for (; pa < paend; pa += sglen) { pg = pfn_to_page(__phys_to_pfn(pa)); if (!sg) { mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n", diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index dee019977716..5f581e71e201 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -3963,28 +3963,28 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name, mvdev->vdev.dma_dev = &mdev->pdev->dev; err = mlx5_vdpa_alloc_resources(&ndev->mvdev); if (err) - goto err_mpfs; + goto err_alloc; err = mlx5_vdpa_init_mr_resources(mvdev); if (err) - goto err_res; + goto err_alloc; if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { err = mlx5_vdpa_create_dma_mr(mvdev); if (err) - goto err_mr_res; + goto err_alloc; } err = alloc_fixed_resources(ndev); if (err) - goto err_mr; + goto err_alloc; ndev->cvq_ent.mvdev = mvdev; INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); if (!mvdev->wq) { err = -ENOMEM; - goto err_res2; + goto err_alloc; } mvdev->vdev.mdev = &mgtdev->mgtdev; @@ -4010,17 +4010,6 @@ err_setup_vq_res: _vdpa_unregister_device(&mvdev->vdev); err_reg: destroy_workqueue(mvdev->wq); -err_res2: - free_fixed_resources(ndev); -err_mr: - mlx5_vdpa_clean_mrs(mvdev); -err_mr_res: - mlx5_vdpa_destroy_mr_resources(mvdev); -err_res: - mlx5_vdpa_free_resources(&ndev->mvdev); -err_mpfs: - if (!is_zero_ether_addr(config->mac)) - mlx5_mpfs_del_mac(pfmdev, config->mac); err_alloc: put_device(&mvdev->vdev.dev); return err; diff --git a/drivers/vdpa/solidrun/snet_main.c b/drivers/vdpa/solidrun/snet_main.c index 99428a04068d..c8b74980dbd1 100644 --- a/drivers/vdpa/solidrun/snet_main.c +++ b/drivers/vdpa/solidrun/snet_main.c @@ -555,7 +555,7 @@ static const struct vdpa_config_ops snet_config_ops = { static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet) { - char name[50]; + char *name; int ret, i, mask = 0; /* We don't know which BAR will be used to communicate.. * We will map every bar with len > 0. @@ -573,7 +573,10 @@ static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet) return -ENODEV; } - snprintf(name, sizeof(name), "psnet[%s]-bars", pci_name(pdev)); + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "psnet[%s]-bars", pci_name(pdev)); + if (!name) + return -ENOMEM; + ret = pcim_iomap_regions(pdev, mask, name); if (ret) { SNET_ERR(pdev, "Failed to request and map PCI BARs\n"); @@ -590,10 +593,13 @@ static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet) static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet) { - char name[50]; + char *name; int ret; - snprintf(name, sizeof(name), "snet[%s]-bar", pci_name(pdev)); + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "snet[%s]-bars", pci_name(pdev)); + if (!name) + return -ENOMEM; + /* Request and map BAR */ ret = pcim_iomap_regions(pdev, BIT(snet->psnet->cfg.vf_bar), name); if (ret) { diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index ac4ab22f7d8b..16380764275e 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -612,7 +612,11 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto mdev_err; } - mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL); + /* + * id_table should be a null terminated array, so allocate one additional + * entry here, see vdpa_mgmtdev_get_classes(). + */ + mdev_id = kcalloc(2, sizeof(struct virtio_device_id), GFP_KERNEL); if (!mdev_id) { err = -ENOMEM; goto mdev_id_err; @@ -632,8 +636,8 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_err; } - mdev_id->device = mdev->id.device; - mdev_id->vendor = mdev->id.vendor; + mdev_id[0].device = mdev->id.device; + mdev_id[0].vendor = mdev->id.vendor; mgtdev->id_table = mdev_id; mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev); mgtdev->supported_features = vp_modern_get_features(mdev); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index c44d8ba00c02..88074451dd61 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -24,6 +24,16 @@ MODULE_PARM_DESC(force_legacy, "Force legacy mode for transitional virtio 1 devices"); #endif +bool vp_is_avq(struct virtio_device *vdev, unsigned int index) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) + return false; + + return index == vp_dev->admin_vq.vq_index; +} + /* wait for pending irq handlers */ void vp_synchronize_vectors(struct virtio_device *vdev) { @@ -234,10 +244,9 @@ out_info: return vq; } -static void vp_del_vq(struct virtqueue *vq) +static void vp_del_vq(struct virtqueue *vq, struct virtio_pci_vq_info *info) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; unsigned long flags; /* @@ -258,13 +267,16 @@ static void vp_del_vq(struct virtqueue *vq) void vp_del_vqs(struct virtio_device *vdev) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info; struct virtqueue *vq, *n; int i; list_for_each_entry_safe(vq, n, &vdev->vqs, list) { - if (vp_dev->per_vq_vectors) { - int v = vp_dev->vqs[vq->index]->msix_vector; + info = vp_is_avq(vdev, vq->index) ? vp_dev->admin_vq.info : + vp_dev->vqs[vq->index]; + if (vp_dev->per_vq_vectors) { + int v = info->msix_vector; if (v != VIRTIO_MSI_NO_VECTOR && !vp_is_slow_path_vector(v)) { int irq = pci_irq_vector(vp_dev->pci_dev, v); @@ -273,7 +285,7 @@ void vp_del_vqs(struct virtio_device *vdev) free_irq(irq, vq); } } - vp_del_vq(vq); + vp_del_vq(vq, info); } vp_dev->per_vq_vectors = false; @@ -354,7 +366,7 @@ vp_find_one_vq_msix(struct virtio_device *vdev, int queue_idx, vring_interrupt, 0, vp_dev->msix_names[msix_vec], vq); if (err) { - vp_del_vq(vq); + vp_del_vq(vq, *p_info); return ERR_PTR(err); } diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 1d9c49947f52..8beecf23ec85 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -178,6 +178,7 @@ struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev); #define VIRTIO_ADMIN_CMD_BITMAP 0 #endif +bool vp_is_avq(struct virtio_device *vdev, unsigned int index); void vp_modern_avq_done(struct virtqueue *vq); int vp_modern_admin_cmd_exec(struct virtio_device *vdev, struct virtio_admin_cmd *cmd); diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 9193c30d640a..4fbcbc7a9ae1 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -43,16 +43,6 @@ static int vp_avq_index(struct virtio_device *vdev, u16 *index, u16 *num) return 0; } -static bool vp_is_avq(struct virtio_device *vdev, unsigned int index) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) - return false; - - return index == vp_dev->admin_vq.vq_index; -} - void vp_modern_avq_done(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); @@ -245,7 +235,7 @@ static void vp_modern_avq_cleanup(struct virtio_device *vdev) if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ)) return; - vq = vp_dev->vqs[vp_dev->admin_vq.vq_index]->vq; + vq = vp_dev->admin_vq.info->vq; if (!vq) return; diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index 47455a85c909..654a58132a4d 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -52,6 +52,12 @@ int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k, enum bch_validate_flags flags) { struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); + int ret = 0; + + bkey_fsck_err_on(bp.v->level > BTREE_MAX_DEPTH, + c, backpointer_level_bad, + "backpointer level bad: %u >= %u", + bp.v->level, BTREE_MAX_DEPTH); rcu_read_lock(); struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode); @@ -64,7 +70,6 @@ int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k, struct bpos bucket = bp_pos_to_bucket(ca, bp.k->p); struct bpos bp_pos = bucket_pos_to_bp_noerror(ca, bucket, bp.v->bucket_offset); rcu_read_unlock(); - int ret = 0; bkey_fsck_err_on((bp.v->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT) >= ca->mi.bucket_size || !bpos_eq(bp.k->p, bp_pos), @@ -947,9 +952,13 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) static int check_one_backpointer(struct btree_trans *trans, struct bbpos start, struct bbpos end, - struct bkey_s_c_backpointer bp, + struct bkey_s_c bp_k, struct bkey_buf *last_flushed) { + if (bp_k.k->type != KEY_TYPE_backpointer) + return 0; + + struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k); struct bch_fs *c = trans->c; struct btree_iter iter; struct bbpos pos = bp_to_bbpos(*bp.v); @@ -1004,9 +1013,7 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ progress_update_iter(trans, &progress, &iter, "backpointers_to_extents"); - check_one_backpointer(trans, start, end, - bkey_s_c_to_backpointer(k), - &last_flushed); + check_one_backpointer(trans, start, end, k, &last_flushed); })); bch2_bkey_buf_exit(&last_flushed, c); diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c index 587d7318a2e8..995ba32e9b6e 100644 --- a/fs/bcachefs/bkey.c +++ b/fs/bcachefs/bkey.c @@ -643,7 +643,7 @@ int bch2_bkey_format_invalid(struct bch_fs *c, enum bch_validate_flags flags, struct printbuf *err) { - unsigned i, bits = KEY_PACKED_BITS_START; + unsigned bits = KEY_PACKED_BITS_START; if (f->nr_fields != BKEY_NR_FIELDS) { prt_printf(err, "incorrect number of fields: got %u, should be %u", @@ -655,9 +655,8 @@ int bch2_bkey_format_invalid(struct bch_fs *c, * Verify that the packed format can't represent fields larger than the * unpacked format: */ - for (i = 0; i < f->nr_fields; i++) { - if ((!c || c->sb.version_min >= bcachefs_metadata_version_snapshot) && - bch2_bkey_format_field_overflows(f, i)) { + for (unsigned i = 0; i < f->nr_fields; i++) { + if (bch2_bkey_format_field_overflows(f, i)) { unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i]; u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1)); unsigned packed_bits = min(64, f->bits_per_field[i]); diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 6e4afb2b5441..7123019ab3bc 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -59,16 +59,38 @@ static inline size_t btree_cache_can_free(struct btree_cache_list *list) static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) { + BUG_ON(!list_empty(&b->list)); + if (b->c.lock.readers) - list_move(&b->list, &bc->freed_pcpu); + list_add(&b->list, &bc->freed_pcpu); else - list_move(&b->list, &bc->freed_nonpcpu); + list_add(&b->list, &bc->freed_nonpcpu); +} + +static void __bch2_btree_node_to_freelist(struct btree_cache *bc, struct btree *b) +{ + BUG_ON(!list_empty(&b->list)); + BUG_ON(!b->data); + + bc->nr_freeable++; + list_add(&b->list, &bc->freeable); } -static void btree_node_data_free(struct bch_fs *c, struct btree *b) +void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) { struct btree_cache *bc = &c->btree_cache; + mutex_lock(&bc->lock); + __bch2_btree_node_to_freelist(bc, b); + mutex_unlock(&bc->lock); + + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); +} + +static void __btree_node_data_free(struct btree_cache *bc, struct btree *b) +{ + BUG_ON(!list_empty(&b->list)); BUG_ON(btree_node_hashed(b)); /* @@ -94,11 +116,17 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b) #endif b->aux_data = NULL; - bc->nr_freeable--; - btree_node_to_freedlist(bc, b); } +static void btree_node_data_free(struct btree_cache *bc, struct btree *b) +{ + BUG_ON(list_empty(&b->list)); + list_del_init(&b->list); + --bc->nr_freeable; + __btree_node_data_free(bc, b); +} + static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg, const void *obj) { @@ -174,21 +202,10 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) bch2_btree_lock_init(&b->c, 0); - bc->nr_freeable++; - list_add(&b->list, &bc->freeable); + __bch2_btree_node_to_freelist(bc, b); return b; } -void bch2_btree_node_to_freelist(struct bch_fs *c, struct btree *b) -{ - mutex_lock(&c->btree_cache.lock); - list_move(&b->list, &c->btree_cache.freeable); - mutex_unlock(&c->btree_cache.lock); - - six_unlock_write(&b->c.lock); - six_unlock_intent(&b->c.lock); -} - static inline bool __btree_node_pinned(struct btree_cache *bc, struct btree *b) { struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p); @@ -236,11 +253,11 @@ void bch2_btree_cache_unpin(struct bch_fs *c) /* Btree in memory cache - hash table */ -void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) +void __bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) { lockdep_assert_held(&bc->lock); - int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); + int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); BUG_ON(ret); /* Cause future lookups for this node to fail: */ @@ -248,17 +265,22 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) if (b->c.btree_id < BTREE_ID_NR) --bc->nr_by_btree[b->c.btree_id]; + --bc->live[btree_node_pinned(b)].nr; + list_del_init(&b->list); +} - bc->live[btree_node_pinned(b)].nr--; - bc->nr_freeable++; - list_move(&b->list, &bc->freeable); +void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) +{ + __bch2_btree_node_hash_remove(bc, b); + __bch2_btree_node_to_freelist(bc, b); } int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) { + BUG_ON(!list_empty(&b->list)); BUG_ON(b->hash_val); - b->hash_val = btree_ptr_hash_val(&b->key); + b->hash_val = btree_ptr_hash_val(&b->key); int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, bch_btree_cache_params); if (ret) @@ -270,10 +292,8 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) bool p = __btree_node_pinned(bc, b); mod_bit(BTREE_NODE_pinned, &b->flags, p); - list_move_tail(&b->list, &bc->live[p].list); + list_add_tail(&b->list, &bc->live[p].list); bc->live[p].nr++; - - bc->nr_freeable--; return 0; } @@ -485,7 +505,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, goto out; if (!btree_node_reclaim(c, b, true)) { - btree_node_data_free(c, b); + btree_node_data_free(bc, b); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); freed++; @@ -501,10 +521,10 @@ restart: bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; --touched;; } else if (!btree_node_reclaim(c, b, true)) { - bch2_btree_node_hash_remove(bc, b); + __bch2_btree_node_hash_remove(bc, b); + __btree_node_data_free(bc, b); freed++; - btree_node_data_free(c, b); bc->nr_freed++; six_unlock_write(&b->c.lock); @@ -587,7 +607,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) BUG_ON(btree_node_read_in_flight(b) || btree_node_write_in_flight(b)); - btree_node_data_free(c, b); + btree_node_data_free(bc, b); } BUG_ON(!bch2_journal_error(&c->journal) && @@ -786,8 +806,8 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea BUG_ON(!six_trylock_intent(&b->c.lock)); BUG_ON(!six_trylock_write(&b->c.lock)); -got_node: +got_node: /* * btree_free() doesn't free memory; it sticks the node on the end of * the list. Check if there's any freed nodes there: @@ -796,7 +816,12 @@ got_node: if (!btree_node_reclaim(c, b2, false)) { swap(b->data, b2->data); swap(b->aux_data, b2->aux_data); + + list_del_init(&b2->list); + --bc->nr_freeable; btree_node_to_freedlist(bc, b2); + mutex_unlock(&bc->lock); + six_unlock_write(&b2->c.lock); six_unlock_intent(&b2->c.lock); goto got_mem; @@ -810,11 +835,8 @@ got_node: goto err; } - mutex_lock(&bc->lock); - bc->nr_freeable++; got_mem: - mutex_unlock(&bc->lock); - + BUG_ON(!list_empty(&b->list)); BUG_ON(btree_node_hashed(b)); BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_write_in_flight(b)); @@ -845,7 +867,7 @@ err: if (bc->alloc_lock == current) { b2 = btree_node_cannibalize(c); clear_btree_node_just_written(b2); - bch2_btree_node_hash_remove(bc, b2); + __bch2_btree_node_hash_remove(bc, b2); if (b) { swap(b->data, b2->data); @@ -855,9 +877,9 @@ err: six_unlock_intent(&b2->c.lock); } else { b = b2; - list_del_init(&b->list); } + BUG_ON(!list_empty(&b->list)); mutex_unlock(&bc->lock); trace_and_count(c, btree_cache_cannibalize, trans); @@ -936,7 +958,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, b->hash_val = 0; mutex_lock(&bc->lock); - list_add(&b->list, &bc->freeable); + __bch2_btree_node_to_freelist(bc, b); mutex_unlock(&bc->lock); six_unlock_write(&b->c.lock); @@ -1312,9 +1334,12 @@ int bch2_btree_node_prefetch(struct btree_trans *trans, b = bch2_btree_node_fill(trans, path, k, btree_id, level, SIX_LOCK_read, false); - if (!IS_ERR_OR_NULL(b)) + int ret = PTR_ERR_OR_ZERO(b); + if (ret) + return ret; + if (b) six_unlock_read(&b->c.lock); - return bch2_trans_relock(trans) ?: PTR_ERR_OR_ZERO(b); + return 0; } void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k) @@ -1353,7 +1378,7 @@ wait_on_io: mutex_lock(&bc->lock); bch2_btree_node_hash_remove(bc, b); - btree_node_data_free(c, b); + btree_node_data_free(bc, b); mutex_unlock(&bc->lock); out: six_unlock_write(&b->c.lock); diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h index 367acd217c6a..66e86d1a178d 100644 --- a/fs/bcachefs/btree_cache.h +++ b/fs/bcachefs/btree_cache.h @@ -14,7 +14,9 @@ void bch2_recalc_btree_reserve(struct bch_fs *); void bch2_btree_node_to_freelist(struct bch_fs *, struct btree *); +void __bch2_btree_node_hash_remove(struct btree_cache *, struct btree *); void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *); + int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *); int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *, unsigned, enum btree_id); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 0ca3feeb42c8..81dcf9e512c0 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -182,7 +182,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max) bch2_btree_node_drop_keys_outside_node(b); mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); + __bch2_btree_node_hash_remove(&c->btree_cache, b); bkey_copy(&b->key, &new->k_i); ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 6296a11ccb09..839d68802e42 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -733,11 +733,8 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, c, ca, b, i, NULL, bset_past_end_of_btree_node, "bset past end of btree node (offset %u len %u but written %zu)", - offset, sectors, ptr_written ?: btree_sectors(c))) { + offset, sectors, ptr_written ?: btree_sectors(c))) i->u64s = 0; - ret = 0; - goto out; - } btree_err_on(offset && !i->u64s, -BCH_ERR_btree_node_read_err_fixable, @@ -829,7 +826,6 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, BSET_BIG_ENDIAN(i), write, &bn->format); } -out: fsck_err: printbuf_exit(&buf2); printbuf_exit(&buf1); diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index a7aedb134e9f..30131c3bdd97 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -186,7 +186,7 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, .ptrs[0].type = 1 << BCH_EXTENT_ENTRY_ptr, .ptrs[0].offset = offset, .ptrs[0].dev = ca->dev_idx, - .ptrs[0].gen = *bucket_gen(ca, sector_to_bucket(ca, offset)), + .ptrs[0].gen = bucket_gen_get(ca, sector_to_bucket(ca, offset)), }; rcu_read_unlock(); diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 64f0928e1137..d596ef93239f 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -237,10 +237,6 @@ static void __btree_node_free(struct btree_trans *trans, struct btree *b) BUG_ON(b->will_make_reachable); clear_btree_node_noevict(b); - - mutex_lock(&c->btree_cache.lock); - list_move(&b->list, &c->btree_cache.freeable); - mutex_unlock(&c->btree_cache.lock); } static void bch2_btree_node_free_inmem(struct btree_trans *trans, @@ -252,12 +248,12 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, bch2_btree_node_lock_write_nofail(trans, path, &b->c); + __btree_node_free(trans, b); + mutex_lock(&c->btree_cache.lock); bch2_btree_node_hash_remove(&c->btree_cache, b); mutex_unlock(&c->btree_cache.lock); - __btree_node_free(trans, b); - six_unlock_write(&b->c.lock); mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); @@ -289,7 +285,7 @@ static void bch2_btree_node_free_never_used(struct btree_update *as, clear_btree_node_need_write(b); mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); + __bch2_btree_node_hash_remove(&c->btree_cache, b); mutex_unlock(&c->btree_cache.lock); BUG_ON(p->nr >= ARRAY_SIZE(p->b)); @@ -521,8 +517,7 @@ static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans * btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); __btree_node_free(trans, b); - six_unlock_write(&b->c.lock); - six_unlock_intent(&b->c.lock); + bch2_btree_node_to_freelist(c, b); } } } @@ -1434,6 +1429,15 @@ bch2_btree_insert_keys_interior(struct btree_update *as, } } +static bool key_deleted_in_insert(struct keylist *insert_keys, struct bpos pos) +{ + if (insert_keys) + for_each_keylist_key(insert_keys, k) + if (bkey_deleted(&k->k) && bpos_eq(k->k.p, pos)) + return true; + return false; +} + /* * Move keys from n1 (original replacement node, now lower node) to n2 (higher * node) @@ -1441,7 +1445,8 @@ bch2_btree_insert_keys_interior(struct btree_update *as, static void __btree_split_node(struct btree_update *as, struct btree_trans *trans, struct btree *b, - struct btree *n[2]) + struct btree *n[2], + struct keylist *insert_keys) { struct bkey_packed *k; struct bpos n1_pos = POS_MIN; @@ -1476,7 +1481,8 @@ static void __btree_split_node(struct btree_update *as, if (b->c.level && u64s < n1_u64s && u64s + k->u64s >= n1_u64s && - bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p)) + (bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p) || + key_deleted_in_insert(insert_keys, uk.p))) n1_u64s += k->u64s; i = u64s >= n1_u64s; @@ -1603,7 +1609,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level); n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level); - __btree_split_node(as, trans, b, n); + __btree_split_node(as, trans, b, n, keys); if (keys) { btree_split_insert_keys(as, trans, path, n1, keys); @@ -2392,7 +2398,8 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, if (new_hash) { mutex_lock(&c->btree_cache.lock); bch2_btree_node_hash_remove(&c->btree_cache, new_hash); - bch2_btree_node_hash_remove(&c->btree_cache, b); + + __bch2_btree_node_hash_remove(&c->btree_cache, b); bkey_copy(&b->key, new_key); ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 3f56b584f8ec..1639c60dffa0 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -277,6 +277,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags); int ret = 0; + ret = bch2_journal_error(&c->journal); + if (ret) + return ret; + bch2_trans_unlock(trans); bch2_trans_begin(trans); @@ -491,7 +495,8 @@ static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq) return ret; } -static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq) +static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq, + bool *did_work) { struct bch_fs *c = trans->c; struct btree_write_buffer *wb = &c->btree_write_buffer; @@ -502,6 +507,8 @@ static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq) fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq); + *did_work |= wb->inc.keys.nr || wb->flushing.keys.nr; + /* * On memory allocation failure, bch2_btree_write_buffer_flush_locked() * is not guaranteed to empty wb->inc: @@ -521,17 +528,34 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *j, struct journal_entry_pin *_pin, u64 seq) { struct bch_fs *c = container_of(j, struct bch_fs, journal); + bool did_work = false; - return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq)); + return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq, &did_work)); } int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans) { struct bch_fs *c = trans->c; + bool did_work = false; trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_); - return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal)); + return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal), &did_work); +} + +/* + * The write buffer requires flushing when going RO: keys in the journal for the + * write buffer don't have a journal pin yet + */ +bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *c) +{ + if (bch2_journal_error(&c->journal)) + return false; + + bool did_work = false; + bch2_trans_run(c, btree_write_buffer_flush_seq(trans, + journal_cur_seq(&c->journal), &did_work)); + return did_work; } int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans) diff --git a/fs/bcachefs/btree_write_buffer.h b/fs/bcachefs/btree_write_buffer.h index 725e79654216..d535cea28bde 100644 --- a/fs/bcachefs/btree_write_buffer.h +++ b/fs/bcachefs/btree_write_buffer.h @@ -21,6 +21,7 @@ static inline bool bch2_btree_write_buffer_must_wait(struct bch_fs *c) struct btree_trans; int bch2_btree_write_buffer_flush_sync(struct btree_trans *); +bool bch2_btree_write_buffer_flush_going_ro(struct bch_fs *); int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *); int bch2_btree_write_buffer_tryflush(struct btree_trans *); diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index fd5e6ccad45e..ccc78bfe2fd4 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -103,12 +103,18 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) return gens->b + b; } -static inline u8 bucket_gen_get(struct bch_dev *ca, size_t b) +static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b) +{ + u8 *gen = bucket_gen(ca, b); + return gen ? *gen : -1; +} + +static inline int bucket_gen_get(struct bch_dev *ca, size_t b) { rcu_read_lock(); - u8 gen = *bucket_gen(ca, b); + int ret = bucket_gen_get_rcu(ca, b); rcu_read_unlock(); - return gen; + return ret; } static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca, @@ -169,10 +175,8 @@ static inline int gen_after(u8 a, u8 b) static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr) { - u8 *gen = bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)); - if (!gen) - return -1; - return gen_after(*gen, ptr->gen); + int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr)); + return gen < 0 ? gen : gen_after(gen, ptr->gen); } /** @@ -184,7 +188,6 @@ static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr rcu_read_lock(); int ret = dev_ptr_stale_rcu(ca, ptr); rcu_read_unlock(); - return ret; } diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index a1bc6c7a8ba0..9c4fe5cdbfb7 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -84,6 +84,7 @@ x(ENOMEM, ENOMEM_dev_alloc) \ x(ENOMEM, ENOMEM_disk_accounting) \ x(ENOMEM, ENOMEM_stripe_head_alloc) \ + x(ENOMEM, ENOMEM_journal_read_bucket) \ x(ENOSPC, ENOSPC_disk_reservation) \ x(ENOSPC, ENOSPC_bucket_alloc) \ x(ENOSPC, ENOSPC_disk_label_add) \ diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index c4e91d123849..37e3d69bec06 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -1364,7 +1364,7 @@ void bch2_ptr_swab(struct bkey_s k) for (entry = ptrs.start; entry < ptrs.end; entry = extent_entry_next(entry)) { - switch (extent_entry_type(entry)) { + switch (__extent_entry_type(entry)) { case BCH_EXTENT_ENTRY_ptr: break; case BCH_EXTENT_ENTRY_crc32: @@ -1384,6 +1384,9 @@ void bch2_ptr_swab(struct bkey_s k) break; case BCH_EXTENT_ENTRY_rebalance: break; + default: + /* Bad entry type: will be caught by validate() */ + return; } } } diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index fc246f342820..b3b934a87c6d 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -262,7 +262,8 @@ err: bio_free_pages(&(*rbio)->bio); kfree(*rbio); *rbio = NULL; - kfree(op); + /* We may have added to the rhashtable and thus need rcu freeing: */ + kfree_rcu(op, rcu); bch2_write_ref_put(c, BCH_WRITE_REF_promote); return ERR_PTR(ret); } @@ -802,16 +803,15 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, PTR_BUCKET_POS(ca, &ptr), BTREE_ITER_cached); - u8 *gen = bucket_gen(ca, iter.pos.offset); - if (gen) { - + int gen = bucket_gen_get(ca, iter.pos.offset); + if (gen >= 0) { prt_printf(&buf, "Attempting to read from stale dirty pointer:\n"); printbuf_indent_add(&buf, 2); bch2_bkey_val_to_text(&buf, c, k); prt_newline(&buf); - prt_printf(&buf, "memory gen: %u", *gen); + prt_printf(&buf, "memory gen: %u", gen); ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))); if (!ret) { diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index 8609e25e450f..96720adcfee0 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -1300,11 +1300,8 @@ retry: bucket_to_u64(i->b), BUCKET_NOCOW_LOCK_UPDATE); - rcu_read_lock(); - u8 *gen = bucket_gen(ca, i->b.offset); - stale = !gen ? -1 : gen_after(*gen, i->gen); - rcu_read_unlock(); - + int gen = bucket_gen_get(ca, i->b.offset); + stale = gen < 0 ? gen : gen_after(gen, i->gen); if (unlikely(stale)) { stale_at = i; goto err_bucket_stale; diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 954f6a96e0f4..fb35dd336331 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -708,6 +708,9 @@ static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs container_of(entry, struct jset_entry_dev_usage, entry); unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); + if (vstruct_bytes(entry) < sizeof(*u)) + return; + prt_printf(out, "dev=%u", le32_to_cpu(u->dev)); printbuf_indent_add(out, 2); @@ -1012,6 +1015,8 @@ reread: nr_bvecs = buf_pages(buf->data, sectors_read << 9); bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); + if (!bio) + return -BCH_ERR_ENOMEM_journal_read_bucket; bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ); bio->bi_iter.bi_sector = offset; diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c index 6673cbd8bdb9..0e2ee262fbd4 100644 --- a/fs/bcachefs/opts.c +++ b/fs/bcachefs/opts.c @@ -226,7 +226,7 @@ const struct bch_option bch2_opt_table[] = { #define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, \ .min = _min, .max = _max #define OPT_STR(_choices) .type = BCH_OPT_STR, \ - .min = 0, .max = ARRAY_SIZE(_choices), \ + .min = 0, .max = ARRAY_SIZE(_choices) - 1, \ .choices = _choices #define OPT_STR_NOLIMIT(_choices) .type = BCH_OPT_STR, \ .min = 0, .max = U64_MAX, \ @@ -428,7 +428,7 @@ void bch2_opt_to_text(struct printbuf *out, prt_printf(out, "%lli", v); break; case BCH_OPT_STR: - if (v < opt->min || v >= opt->max - 1) + if (v < opt->min || v >= opt->max) prt_printf(out, "(invalid option %lli)", v); else if (flags & OPT_SHOW_FULL_LIST) prt_string_option(out, opt->choices, v); diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index 32d15aacc069..3c7f941dde39 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -862,6 +862,13 @@ use_clean: if (ret) goto err; + /* + * Normally set by the appropriate recovery pass: when cleared, this + * indicates we're in early recovery and btree updates should be done by + * being applied to the journal replay keys. _Must_ be cleared before + * multithreaded use: + */ + set_bit(BCH_FS_may_go_rw, &c->flags); clear_bit(BCH_FS_fsck_running, &c->flags); /* in case we don't run journal replay, i.e. norecovery mode */ diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c index 735b8adc8f9d..dff589ddc984 100644 --- a/fs/bcachefs/recovery_passes.c +++ b/fs/bcachefs/recovery_passes.c @@ -27,6 +27,12 @@ const char * const bch2_recovery_passes[] = { NULL }; +/* Fake recovery pass, so that scan_for_btree_nodes isn't 0: */ +static int bch2_recovery_pass_empty(struct bch_fs *c) +{ + return 0; +} + static int bch2_set_may_go_rw(struct bch_fs *c) { struct journal_keys *keys = &c->journal_keys; @@ -221,6 +227,12 @@ int bch2_run_recovery_passes(struct bch_fs *c) { int ret = 0; + /* + * We can't allow set_may_go_rw to be excluded; that would cause us to + * use the journal replay keys for updates where it's not expected. + */ + c->opts.recovery_passes_exclude &= ~BCH_RECOVERY_PASS_set_may_go_rw; + while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) { if (c->opts.recovery_pass_last && c->curr_recovery_pass > c->opts.recovery_pass_last) diff --git a/fs/bcachefs/recovery_passes_types.h b/fs/bcachefs/recovery_passes_types.h index 9d96c06e365c..94dc20ca2065 100644 --- a/fs/bcachefs/recovery_passes_types.h +++ b/fs/bcachefs/recovery_passes_types.h @@ -13,6 +13,7 @@ * must never change: */ #define BCH_RECOVERY_PASSES() \ + x(recovery_pass_empty, 41, PASS_SILENT) \ x(scan_for_btree_nodes, 37, 0) \ x(check_topology, 4, 0) \ x(accounting_read, 39, PASS_ALWAYS) \ diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h index 937275d061fe..9feb6739f77a 100644 --- a/fs/bcachefs/sb-errors_format.h +++ b/fs/bcachefs/sb-errors_format.h @@ -136,7 +136,9 @@ enum bch_fsck_flags { x(bucket_gens_nonzero_for_invalid_buckets, 122, FSCK_AUTOFIX) \ x(need_discard_freespace_key_to_invalid_dev_bucket, 123, 0) \ x(need_discard_freespace_key_bad, 124, 0) \ + x(discarding_bucket_not_in_need_discard_btree, 291, 0) \ x(backpointer_bucket_offset_wrong, 125, 0) \ + x(backpointer_level_bad, 294, 0) \ x(backpointer_to_missing_device, 126, 0) \ x(backpointer_to_missing_alloc, 127, 0) \ x(backpointer_to_missing_ptr, 128, 0) \ @@ -177,7 +179,9 @@ enum bch_fsck_flags { x(ptr_stripe_redundant, 163, 0) \ x(reservation_key_nr_replicas_invalid, 164, 0) \ x(reflink_v_refcount_wrong, 165, 0) \ + x(reflink_v_pos_bad, 292, 0) \ x(reflink_p_to_missing_reflink_v, 166, 0) \ + x(reflink_refcount_underflow, 293, 0) \ x(stripe_pos_bad, 167, 0) \ x(stripe_val_size_bad, 168, 0) \ x(stripe_csum_granularity_bad, 290, 0) \ @@ -302,7 +306,7 @@ enum bch_fsck_flags { x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \ x(accounting_key_version_0, 282, FSCK_AUTOFIX) \ x(logged_op_but_clean, 283, FSCK_AUTOFIX) \ - x(MAX, 291, 0) + x(MAX, 295, 0) enum bch_sb_error_id { #define x(t, n, ...) BCH_FSCK_ERR_##t = n, diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c index fb08dd680dac..116131f95815 100644 --- a/fs/bcachefs/sb-members.c +++ b/fs/bcachefs/sb-members.c @@ -163,7 +163,7 @@ static int validate_member(struct printbuf *err, return -BCH_ERR_invalid_sb_members; } - if (m.btree_bitmap_shift >= 64) { + if (m.btree_bitmap_shift >= BCH_MI_BTREE_BITMAP_SHIFT_MAX) { prt_printf(err, "device %u: invalid btree_bitmap_shift %u", i, m.btree_bitmap_shift); return -BCH_ERR_invalid_sb_members; } @@ -450,7 +450,7 @@ static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, uns m->btree_bitmap_shift += resize; } - BUG_ON(m->btree_bitmap_shift > 57); + BUG_ON(m->btree_bitmap_shift >= BCH_MI_BTREE_BITMAP_SHIFT_MAX); BUG_ON(end > 64ULL << m->btree_bitmap_shift); for (unsigned bit = start >> m->btree_bitmap_shift; diff --git a/fs/bcachefs/sb-members_format.h b/fs/bcachefs/sb-members_format.h index d727d2dfda08..2adf1221a440 100644 --- a/fs/bcachefs/sb-members_format.h +++ b/fs/bcachefs/sb-members_format.h @@ -66,6 +66,12 @@ struct bch_member { }; /* + * btree_allocated_bitmap can represent sector addresses of a u64: it itself has + * 64 elements, so 64 - ilog2(64) + */ +#define BCH_MI_BTREE_BITMAP_SHIFT_MAX 58 + +/* * This limit comes from the bucket_gens array - it's a single allocation, and * kernel allocation are limited to INT_MAX */ diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 657fd3759e7b..a6ed9a0bf1c7 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -272,6 +272,7 @@ static void __bch2_fs_read_only(struct bch_fs *c) clean_passes++; if (bch2_btree_interior_updates_flush(c) || + bch2_btree_write_buffer_flush_going_ro(c) || bch2_journal_flush_all_pins(&c->journal) || bch2_btree_flush_all_writes(c) || seq != atomic64_read(&c->journal.seq)) { diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c index 315038a0a92d..fb5c1543e52f 100644 --- a/fs/bcachefs/tests.c +++ b/fs/bcachefs/tests.c @@ -809,6 +809,11 @@ int bch2_btree_perf_test(struct bch_fs *c, const char *testname, unsigned i; u64 time; + if (nr == 0 || nr_threads == 0) { + pr_err("nr of iterations or threads is not allowed to be 0"); + return -EINVAL; + } + atomic_set(&j.ready, nr_threads); init_waitqueue_head(&j.ready_wait); diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 115b90d29b1d..65d841d7142c 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -649,7 +649,7 @@ static bool insert_delayed_ref(struct btrfs_trans_handle *trans, &href->ref_add_list); else if (ref->action == BTRFS_DROP_DELAYED_REF) { ASSERT(!list_empty(&exist->add_list)); - list_del(&exist->add_list); + list_del_init(&exist->add_list); } else { ASSERT(0); } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index da51edbad6a0..1e4ca1e7d2e5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1618,7 +1618,7 @@ out_unlock: clear_bits |= EXTENT_CLEAR_DATA_RESV; extent_clear_unlock_delalloc(inode, start, end, locked_folio, &cached, clear_bits, page_ops); - btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL); + btrfs_qgroup_free_data(inode, NULL, start, end - start + 1, NULL); } return ret; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 926d7a9ed99d..c64d07134122 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1979,25 +1979,10 @@ error: * fsconfig(FSCONFIG_SET_FLAG, "ro"). This option is seen by the filesystem * in fc->sb_flags. * - * This disambiguation has rather positive consequences. Mounting a subvolume - * ro will not also turn the superblock ro. Only the mount for the subvolume - * will become ro. - * - * So, if the superblock creation request comes from the new mount API the - * caller must have explicitly done: - * - * fsconfig(FSCONFIG_SET_FLAG, "ro") - * fsmount/mount_setattr(MOUNT_ATTR_RDONLY) - * - * IOW, at some point the caller must have explicitly turned the whole - * superblock ro and we shouldn't just undo it like we did for the old mount - * API. In any case, it lets us avoid the hack in the new mount API. - * - * Consequently, the remounting hack must only be used for requests originating - * from the old mount API and should be marked for full deprecation so it can be - * turned off in a couple of years. - * - * The new mount API has no reason to support this hack. + * But, currently the util-linux mount command already utilizes the new mount + * API and is still setting fsconfig(FSCONFIG_SET_FLAG, "ro") no matter if it's + * btrfs or not, setting the whole super block RO. To make per-subvolume mounting + * work with different options work we need to keep backward compatibility. */ static struct vfsmount *btrfs_reconfigure_for_mount(struct fs_context *fc) { @@ -2019,7 +2004,7 @@ static struct vfsmount *btrfs_reconfigure_for_mount(struct fs_context *fc) if (IS_ERR(mnt)) return mnt; - if (!fc->oldapi || !ro2rw) + if (!ro2rw) return mnt; /* We need to convert to rw, call reconfigure. */ diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 22325b590e17..d6d4f2a0e898 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -903,11 +903,6 @@ __nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, goto out; } - if (may_flags & NFSD_MAY_64BIT_COOKIE) - file->f_mode |= FMODE_64BITHASH; - else - file->f_mode |= FMODE_32BITHASH; - *filp = file; out: return host_err; @@ -2174,13 +2169,15 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp, loff_t offset = *offsetp; int may_flags = NFSD_MAY_READ; - if (fhp->fh_64bit_cookies) - may_flags |= NFSD_MAY_64BIT_COOKIE; - err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file); if (err) goto out; + if (fhp->fh_64bit_cookies) + file->f_mode |= FMODE_64BITHASH; + else + file->f_mode |= FMODE_32BITHASH; + offset = vfs_llseek(file, offset, SEEK_SET); if (offset < 0) { err = nfserrno((int)offset); diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 57b4af5ad646..501ad7be5174 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -68,7 +68,6 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) goto failed; } memset(bh->b_data, 0, i_blocksize(inode)); - bh->b_bdev = inode->i_sb->s_bdev; bh->b_blocknr = blocknr; set_buffer_mapped(bh); set_buffer_uptodate(bh); @@ -133,7 +132,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, goto found; } set_buffer_mapped(bh); - bh->b_bdev = inode->i_sb->s_bdev; bh->b_blocknr = pblocknr; /* set block address for read */ bh->b_end_io = end_buffer_read_sync; get_bh(bh); diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 1c9ae36a03ab..ace22253fed0 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -83,10 +83,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, goto out; } - if (!buffer_mapped(bh)) { - bh->b_bdev = inode->i_sb->s_bdev; + if (!buffer_mapped(bh)) set_buffer_mapped(bh); - } bh->b_blocknr = pbn; bh->b_end_io = end_buffer_read_sync; get_bh(bh); diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index ceb7dc0b5bad..2db6350b5ac2 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -89,7 +89,6 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, if (buffer_uptodate(bh)) goto failed_bh; - bh->b_bdev = sb->s_bdev; err = nilfs_mdt_insert_new_block(inode, block, bh, init_block); if (likely(!err)) { get_bh(bh); diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 10def4b55995..6dd8b854cd1f 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -39,7 +39,6 @@ static struct buffer_head *__nilfs_get_folio_block(struct folio *folio, first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); bh = get_nth_bh(bh, block - first_block); - touch_buffer(bh); wait_on_buffer(bh); return bh; } @@ -64,6 +63,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode, folio_put(folio); return NULL; } + bh->b_bdev = inode->i_sb->s_bdev; return bh; } diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 3d404624bb96..c79b4291777f 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2319,6 +2319,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, struct ocfs2_blockcheck_stats *stats) { int status = -EAGAIN; + u32 blksz_bits; if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { @@ -2333,11 +2334,15 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, goto out; } status = -EINVAL; - if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != blksz) { + /* Acceptable block sizes are 512 bytes, 1K, 2K and 4K. */ + blksz_bits = le32_to_cpu(di->id2.i_super.s_blocksize_bits); + if (blksz_bits < 9 || blksz_bits > 12) { mlog(ML_ERROR, "found superblock with incorrect block " - "size: found %u, should be %u\n", - 1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits), - blksz); + "size bits: found %u, should be 9, 10, 11, or 12\n", + blksz_bits); + } else if ((1 << le32_to_cpu(blksz_bits)) != blksz) { + mlog(ML_ERROR, "found superblock with incorrect block " + "size: found %u, should be %u\n", 1 << blksz_bits, blksz); } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) != OCFS2_MAJOR_REV_LEVEL || le16_to_cpu(di->id2.i_super.s_minor_rev_level) != diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index dd0a05365e79..73a6f6fd8a8e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2036,8 +2036,7 @@ static int ocfs2_xa_remove(struct ocfs2_xa_loc *loc, rc = 0; ocfs2_xa_cleanup_value_truncate(loc, "removing", orig_clusters); - if (rc) - goto out; + goto out; } } diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index b52d85f8ad59..b4521b096058 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -457,10 +457,6 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf) #endif } -static const struct vm_operations_struct vmcore_mmap_ops = { - .fault = mmap_vmcore_fault, -}; - /** * vmcore_alloc_buf - allocate buffer in vmalloc memory * @size: size of buffer @@ -488,6 +484,11 @@ static inline char *vmcore_alloc_buf(size_t size) * virtually contiguous user-space in ELF layout. */ #ifdef CONFIG_MMU + +static const struct vm_operations_struct vmcore_mmap_ops = { + .fault = mmap_vmcore_fault, +}; + /* * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages * reported as not being ram with the zero page. diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 15d94ac4095e..0ce2d704b1f3 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -1037,6 +1037,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server) */ } + put_net(cifs_net_ns(server)); kfree(server->leaf_fullpath); kfree(server); @@ -1635,8 +1636,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) /* srv_count can never go negative */ WARN_ON(server->srv_count < 0); - put_net(cifs_net_ns(server)); - list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); @@ -3070,13 +3069,22 @@ generic_ip_connect(struct TCP_Server_Info *server) if (server->ssocket) { socket = server->ssocket; } else { - rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, + struct net *net = cifs_net_ns(server); + struct sock *sk; + + rc = __sock_create(net, sfamily, SOCK_STREAM, IPPROTO_TCP, &server->ssocket, 1); if (rc < 0) { cifs_server_dbg(VFS, "Error %d creating socket\n", rc); return rc; } + sk = server->ssocket->sk; + __netns_tracker_free(net, &sk->ns_tracker, false); + sk->sk_net_refcnt = 1; + get_net_track(net, &sk->ns_tracker, GFP_KERNEL); + sock_inuse_add(net, 1); + /* BB other socket options to set KEEPALIVE, NODELAY? */ cifs_dbg(FYI, "Socket created\n"); socket = server->ssocket; diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c index aa2a37a7ce84..e6a72f75ab94 100644 --- a/fs/smb/server/connection.c +++ b/fs/smb/server/connection.c @@ -70,6 +70,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void) atomic_set(&conn->req_running, 0); atomic_set(&conn->r_count, 0); atomic_set(&conn->refcnt, 1); + atomic_set(&conn->mux_smb_requests, 0); conn->total_credits = 1; conn->outstanding_credits = 0; diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h index b379ae4fdcdf..8ddd5a3c7baf 100644 --- a/fs/smb/server/connection.h +++ b/fs/smb/server/connection.h @@ -107,6 +107,7 @@ struct ksmbd_conn { __le16 signing_algorithm; bool binding; atomic_t refcnt; + atomic_t mux_smb_requests; }; struct ksmbd_conn_ops { diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c index 1e4624e9d434..ad02fe555fda 100644 --- a/fs/smb/server/mgmt/user_session.c +++ b/fs/smb/server/mgmt/user_session.c @@ -90,7 +90,7 @@ static int __rpc_method(char *rpc_name) int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name) { - struct ksmbd_session_rpc *entry; + struct ksmbd_session_rpc *entry, *old; struct ksmbd_rpc_command *resp; int method; @@ -106,16 +106,19 @@ int ksmbd_session_rpc_open(struct ksmbd_session *sess, char *rpc_name) entry->id = ksmbd_ipc_id_alloc(); if (entry->id < 0) goto free_entry; - xa_store(&sess->rpc_handle_list, entry->id, entry, GFP_KERNEL); + old = xa_store(&sess->rpc_handle_list, entry->id, entry, GFP_KERNEL); + if (xa_is_err(old)) + goto free_id; resp = ksmbd_rpc_open(sess, entry->id); if (!resp) - goto free_id; + goto erase_xa; kvfree(resp); return entry->id; -free_id: +erase_xa: xa_erase(&sess->rpc_handle_list, entry->id); +free_id: ksmbd_rpc_id_free(entry->id); free_entry: kfree(entry); @@ -175,6 +178,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn) unsigned long id; struct ksmbd_session *sess; + down_write(&sessions_table_lock); down_write(&conn->session_lock); xa_for_each(&conn->sessions, id, sess) { if (atomic_read(&sess->refcnt) == 0 && @@ -188,6 +192,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn) } } up_write(&conn->session_lock); + up_write(&sessions_table_lock); } int ksmbd_session_register(struct ksmbd_conn *conn, @@ -229,7 +234,6 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) } } } - up_write(&sessions_table_lock); down_write(&conn->session_lock); xa_for_each(&conn->sessions, id, sess) { @@ -249,6 +253,7 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) } } up_write(&conn->session_lock); + up_write(&sessions_table_lock); } struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c index 9670c97f14b3..e6cfedba9992 100644 --- a/fs/smb/server/server.c +++ b/fs/smb/server/server.c @@ -238,11 +238,11 @@ static void __handle_ksmbd_work(struct ksmbd_work *work, } while (is_chained == true); send: - if (work->sess) - ksmbd_user_session_put(work->sess); if (work->tcon) ksmbd_tree_connect_put(work->tcon); smb3_preauth_hash_rsp(work); + if (work->sess) + ksmbd_user_session_put(work->sess); if (work->sess && work->sess->enc && work->encrypted && conn->ops->encrypt_resp) { rc = conn->ops->encrypt_resp(work); @@ -270,6 +270,7 @@ static void handle_ksmbd_work(struct work_struct *wk) ksmbd_conn_try_dequeue_request(work); ksmbd_free_work_struct(work); + atomic_dec(&conn->mux_smb_requests); /* * Checking waitqueue to dropping pending requests on * disconnection. waitqueue_active is safe because it @@ -291,6 +292,15 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn) struct ksmbd_work *work; int err; + err = ksmbd_init_smb_server(conn); + if (err) + return 0; + + if (atomic_inc_return(&conn->mux_smb_requests) >= conn->vals->max_credits) { + atomic_dec_return(&conn->mux_smb_requests); + return -ENOSPC; + } + work = ksmbd_alloc_work_struct(); if (!work) { pr_err("allocation for work failed\n"); @@ -301,12 +311,6 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn) work->request_buf = conn->request_buf; conn->request_buf = NULL; - err = ksmbd_init_smb_server(work); - if (err) { - ksmbd_free_work_struct(work); - return 0; - } - ksmbd_conn_enqueue_request(work); atomic_inc(&conn->r_count); /* update activity on connection */ diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c index a2ebbe604c8c..75b4eb856d32 100644 --- a/fs/smb/server/smb_common.c +++ b/fs/smb/server/smb_common.c @@ -388,6 +388,10 @@ static struct smb_version_ops smb1_server_ops = { .set_rsp_status = set_smb1_rsp_status, }; +static struct smb_version_values smb1_server_values = { + .max_credits = SMB2_MAX_CREDITS, +}; + static int smb1_negotiate(struct ksmbd_work *work) { return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE); @@ -399,18 +403,18 @@ static struct smb_version_cmds smb1_server_cmds[1] = { static int init_smb1_server(struct ksmbd_conn *conn) { + conn->vals = &smb1_server_values; conn->ops = &smb1_server_ops; conn->cmds = smb1_server_cmds; conn->max_cmds = ARRAY_SIZE(smb1_server_cmds); return 0; } -int ksmbd_init_smb_server(struct ksmbd_work *work) +int ksmbd_init_smb_server(struct ksmbd_conn *conn) { - struct ksmbd_conn *conn = work->conn; __le32 proto; - proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol; + proto = *(__le32 *)((struct smb_hdr *)conn->request_buf)->Protocol; if (conn->need_neg == false) { if (proto == SMB1_PROTO_NUMBER) return -EINVAL; diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h index cc1d6dfe29d5..a3d8a905b07e 100644 --- a/fs/smb/server/smb_common.h +++ b/fs/smb/server/smb_common.h @@ -427,7 +427,7 @@ bool ksmbd_smb_request(struct ksmbd_conn *conn); int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count); -int ksmbd_init_smb_server(struct ksmbd_work *work); +int ksmbd_init_smb_server(struct ksmbd_conn *conn); struct ksmbd_kstat; int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, diff --git a/include/acpi/processor.h b/include/acpi/processor.h index e6f6074eadbf..a17e97e634a6 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -465,4 +465,6 @@ extern int acpi_processor_ffh_lpi_probe(unsigned int cpu); extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi); #endif +void acpi_processor_init_invariance_cppc(void); + #endif diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index b721f360d759..4a952c4885ed 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -11,10 +11,6 @@ void topology_normalize_cpu_scale(void); int topology_update_cpu_topology(void); -#ifdef CONFIG_ACPI_CPPC_LIB -void topology_init_cpu_capacity_cppc(void); -#endif - struct device_node; bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index f59099a213d0..67f6fdf2e7cd 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -315,8 +315,6 @@ u32 arm_smccc_get_version(void); void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit); -extern u64 smccc_has_sve_hint; - /** * arm_smccc_get_soc_id_version() * @@ -415,15 +413,6 @@ struct arm_smccc_quirk { }; /** - * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls - * - * Sets the SMCCC hint bit to indicate if there is live state in the SVE - * registers, this modifies x0 in place and should never be called from C - * code. - */ -asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0); - -/** * __arm_smccc_smc() - make SMC calls * @a0-a7: arguments passed in registers 0 to 7 * @res: result values from registers 0 to 3 @@ -490,20 +479,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #endif -/* nVHE hypervisor doesn't have a current thread so needs separate checks */ -#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__) - -#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \ - ARM64_SVE) -#define smccc_sve_clobbers "x16", "x30", "cc", - -#else - -#define SMCCC_SVE_CHECK -#define smccc_sve_clobbers - -#endif - #define __constraint_read_2 "r" (arg0) #define __constraint_read_3 __constraint_read_2, "r" (arg1) #define __constraint_read_4 __constraint_read_3, "r" (arg2) @@ -574,12 +549,11 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, register unsigned long r3 asm("r3"); \ CONCATENATE(__declare_arg_, \ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \ - asm volatile(SMCCC_SVE_CHECK \ - inst "\n" : \ + asm volatile(inst "\n" : \ "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \ : CONCATENATE(__constraint_read_, \ COUNT_ARGS(__VA_ARGS__)) \ - : smccc_sve_clobbers "memory"); \ + : "memory"); \ if (___res) \ *___res = (typeof(*___res)){r0, r1, r2, r3}; \ } while (0) @@ -628,7 +602,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, asm ("" : \ : CONCATENATE(__constraint_read_, \ COUNT_ARGS(__VA_ARGS__)) \ - : smccc_sve_clobbers "memory"); \ + : "memory"); \ if (___res) \ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \ } while (0) diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h deleted file mode 100644 index 76860a461ed2..000000000000 --- a/include/linux/ath9k_platform.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2008 Atheros Communications Inc. - * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> - * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _LINUX_ATH9K_PLATFORM_H -#define _LINUX_ATH9K_PLATFORM_H - -#define ATH9K_PLAT_EEP_MAX_WORDS 2048 - -struct ath9k_platform_data { - const char *eeprom_name; - - u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS]; - u8 *macaddr; - - int led_pin; - u32 gpio_mask; - u32 gpio_val; - - u32 bt_active_pin; - u32 bt_priority_pin; - u32 wlan_active_pin; - - bool endian_check; - bool is_clk_25mhz; - bool tx_gain_buffalo; - bool disable_2ghz; - bool disable_5ghz; - bool led_active_high; - - int (*get_mac_revision)(void); - int (*external_reset)(void); - - bool use_eeprom; -}; - -#endif /* _LINUX_ATH9K_PLATFORM_H */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 1199e308c8dd..299280c94d07 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -734,6 +734,9 @@ struct kernel_ethtool_ts_info { * @rxfh_per_ctx_key: device supports setting different RSS key for each * additional context. Netlink API should report hfunc, key, and input_xfrm * for every context, not just context 0. + * @cap_rss_rxnfc_adds: device supports nonzero ring_cookie in filters with + * %FLOW_RSS flag; the queue ID from the filter is added to the value from + * the indirection table to determine the delivery queue. * @rxfh_indir_space: max size of RSS indirection tables, if indirection table * size as returned by @get_rxfh_indir_size may change during lifetime * of the device. Leave as 0 if the table size is constant. @@ -956,6 +959,7 @@ struct ethtool_ops { u32 cap_rss_ctx_supported:1; u32 cap_rss_sym_xor_supported:1; u32 rxfh_per_ctx_key:1; + u32 cap_rss_rxnfc_adds:1; u32 rxfh_indir_space; u16 rxfh_key_space; u16 rxfh_priv_size; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 456bca45ff05..05dedc45505c 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1445,6 +1445,8 @@ struct ieee80211_mgmt { __le16 status; __le16 capab; __le16 timeout; + /* followed by BA Extension */ + u8 variable[]; } __packed addba_resp; struct{ u8 action_code; diff --git a/include/linux/mdio.h b/include/linux/mdio.h index efeca5bd7600..3c3deac57894 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -165,31 +165,12 @@ extern int mdio_set_flag(const struct mdio_if_info *mdio, bool sense); extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds); extern int mdio45_nway_restart(const struct mdio_if_info *mdio); -extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, - struct ethtool_cmd *ecmd, - u32 npage_adv, u32 npage_lpa); extern void mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, struct ethtool_link_ksettings *cmd, u32 npage_adv, u32 npage_lpa); /** - * mdio45_ethtool_gset - get settings for ETHTOOL_GSET - * @mdio: MDIO interface - * @ecmd: Ethtool request structure - * - * Since the CSRs for auto-negotiation using next pages are not fully - * standardised, this function does not attempt to decode them. Use - * mdio45_ethtool_gset_npage() to specify advertisement bits from next - * pages. - */ -static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio, - struct ethtool_cmd *ecmd) -{ - mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); -} - -/** * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS * @mdio: MDIO interface * @cmd: Ethtool request structure diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 34d2da05f2f1..e1b41554a5fb 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1760,8 +1760,9 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg) struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); -static inline void count_objcg_event(struct obj_cgroup *objcg, - enum vm_event_item idx) +static inline void count_objcg_events(struct obj_cgroup *objcg, + enum vm_event_item idx, + unsigned long count) { struct mem_cgroup *memcg; @@ -1770,7 +1771,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg, rcu_read_lock(); memcg = obj_cgroup_memcg(objcg); - count_memcg_events(memcg, idx, 1); + count_memcg_events(memcg, idx, count); rcu_read_unlock(); } @@ -1825,8 +1826,9 @@ static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) return NULL; } -static inline void count_objcg_event(struct obj_cgroup *objcg, - enum vm_event_item idx) +static inline void count_objcg_events(struct obj_cgroup *objcg, + enum vm_event_item idx, + unsigned long count) { } diff --git a/include/linux/mman.h b/include/linux/mman.h index bcb201ab7a41..a842783ffa62 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -2,6 +2,7 @@ #ifndef _LINUX_MMAN_H #define _LINUX_MMAN_H +#include <linux/fs.h> #include <linux/mm.h> #include <linux/percpu_counter.h> @@ -94,7 +95,7 @@ static inline void vm_unacct_memory(long pages) #endif #ifndef arch_calc_vm_flag_bits -#define arch_calc_vm_flag_bits(flags) 0 +#define arch_calc_vm_flag_bits(file, flags) 0 #endif #ifndef arch_validate_prot @@ -151,13 +152,13 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey) * Combine the mmap "flags" argument into "vm_flags" used internally. */ static inline unsigned long -calc_vm_flag_bits(unsigned long flags) +calc_vm_flag_bits(struct file *file, unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) | - arch_calc_vm_flag_bits(flags); + arch_calc_vm_flag_bits(file, flags); } unsigned long vm_commit_limit(void); @@ -188,16 +189,31 @@ static inline bool arch_memory_deny_write_exec_supported(void) * * d) mmap(PROT_READ | PROT_EXEC) * mmap(PROT_READ | PROT_EXEC | PROT_BTI) + * + * This is only applicable if the user has set the Memory-Deny-Write-Execute + * (MDWE) protection mask for the current process. + * + * @old specifies the VMA flags the VMA originally possessed, and @new the ones + * we propose to set. + * + * Return: false if proposed change is OK, true if not ok and should be denied. */ -static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags) +static inline bool map_deny_write_exec(unsigned long old, unsigned long new) { + /* If MDWE is disabled, we have nothing to deny. */ if (!test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) return false; - if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE)) + /* If the new VMA is not executable, we have nothing to deny. */ + if (!(new & VM_EXEC)) + return false; + + /* Under MDWE we do not accept newly writably executable VMAs... */ + if (new & VM_WRITE) return true; - if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC)) + /* ...nor previously non-executable VMAs becoming executable. */ + if (!(old & VM_EXEC)) return true; return false; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5b1c984daf45..80bc5640bb60 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -823,6 +823,7 @@ struct zone { unsigned long watermark_boost; unsigned long nr_reserved_highatomic; + unsigned long nr_free_highatomic; /* * We don't know if the memory that we're going to allocate will be diff --git a/include/linux/phy.h b/include/linux/phy.h index 1e4127c495c0..b8346db42727 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -721,16 +721,15 @@ struct phy_device { /* used for eee validation and configuration*/ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee); + /* Energy efficient ethernet modes which should be prohibited */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_broken_modes); bool eee_enabled; + bool enable_tx_lpi; + struct eee_config eee_cfg; /* Host supported PHY interface types. Should be ignored if empty. */ DECLARE_PHY_INTERFACE_MASK(host_interfaces); - /* Energy efficient ethernet modes which should be prohibited */ - u32 eee_broken_modes; - bool enable_tx_lpi; - struct eee_config eee_cfg; - #ifdef CONFIG_LED_TRIGGER_PHY struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; @@ -1265,6 +1264,16 @@ void of_set_phy_timing_role(struct phy_device *phydev); int phy_speed_down_core(struct phy_device *phydev); /** + * phy_set_eee_broken - Mark an EEE mode as broken so that it isn't advertised. + * @phydev: The phy_device struct + * @link_mode: The broken EEE mode + */ +static inline void phy_set_eee_broken(struct phy_device *phydev, u32 link_mode) +{ + linkmode_set_bit(link_mode, phydev->eee_broken_modes); +} + +/** * phy_is_started - Convenience function to check whether PHY is started * @phydev: The phy_device struct */ diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h index 2ee1a679e592..0e0e8fe6975f 100644 --- a/include/linux/platform_data/microchip-ksz.h +++ b/include/linux/platform_data/microchip-ksz.h @@ -42,6 +42,7 @@ enum ksz_chip_id { LAN9372_CHIP_ID = 0x00937200, LAN9373_CHIP_ID = 0x00937300, LAN9374_CHIP_ID = 0x00937400, + LAN9646_CHIP_ID = 0x00964600, }; struct ksz_platform_data { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 60535c706851..58009fa66102 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2682,6 +2682,12 @@ static inline void skb_assert_len(struct sk_buff *skb) #endif /* CONFIG_DEBUG_NET */ } +#if defined(CONFIG_FAIL_SKB_REALLOC) +void skb_might_realloc(struct sk_buff *skb); +#else +static inline void skb_might_realloc(struct sk_buff *skb) {} +#endif + /* * Add data to an sk_buff */ @@ -2782,6 +2788,7 @@ static inline enum skb_drop_reason pskb_may_pull_reason(struct sk_buff *skb, unsigned int len) { DEBUG_NET_WARN_ON_ONCE(len > INT_MAX); + skb_might_realloc(skb); if (likely(len <= skb_headlen(skb))) return SKB_NOT_DROPPED_YET; @@ -3240,6 +3247,7 @@ static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) static inline int pskb_trim(struct sk_buff *skb, unsigned int len) { + skb_might_realloc(skb); return (len < skb->len) ? __pskb_trim(skb, len) : 0; } @@ -3994,6 +4002,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) { + skb_might_realloc(skb); if (likely(len >= skb->len)) return 0; return pskb_trim_rcsum_slow(skb, len); diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index fc5a206c4043..195debe2b1db 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -77,7 +77,9 @@ static inline int copy_safe_from_sockptr(void *dst, size_t ksize, { if (optlen < ksize) return -EINVAL; - return copy_from_sockptr(dst, optval, ksize); + if (copy_from_sockptr(dst, optval, ksize)) + return -EFAULT; + return 0; } static inline int copy_struct_from_sockptr(void *dst, size_t ksize, diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 587b96b4418e..20a40ade8030 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -421,6 +421,7 @@ void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value); u8 tpm_buf_read_u8(struct tpm_buf *buf, off_t *offset); u16 tpm_buf_read_u16(struct tpm_buf *buf, off_t *offset); u32 tpm_buf_read_u32(struct tpm_buf *buf, off_t *offset); +void tpm_buf_append_handle(struct tpm_chip *chip, struct tpm_buf *buf, u32 handle); /* * Check if TPM device is in the firmware upgrade mode. @@ -505,6 +506,8 @@ void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf, void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf, u8 attributes, u8 *passphrase, int passphraselen); +void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf, + u8 attributes, u8 *passphrase, int passphraselen); static inline void tpm_buf_append_hmac_session_opt(struct tpm_chip *chip, struct tpm_buf *buf, u8 attributes, diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 3625096d5f85..7183e5aca282 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -141,7 +141,8 @@ static inline long get_rlimit_value(struct ucounts *ucounts, enum rlimit_type ty long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v); bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v); -long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type); +long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type, + bool override_rlimit); void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type); bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long max); diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index aed952d04132..f70d0958095c 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -134,6 +134,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_SWAP SWAP_RA, SWAP_RA_HIT, + SWPIN_ZERO, + SWPOUT_ZERO, #ifdef CONFIG_KSM KSM_SWPIN_COPY, #endif diff --git a/include/net/bond_options.h b/include/net/bond_options.h index 473a0147769e..18687ccf0638 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -161,5 +161,7 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond); #if IS_ENABLED(CONFIG_IPV6) void bond_option_ns_ip6_targets_clear(struct bonding *bond); #endif +void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave); +void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave); #endif /* _NET_BOND_OPTIONS_H */ diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index d59bb96c5a02..6c5a1ea209a2 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -76,6 +76,10 @@ FN(INVALID_PROTO) \ FN(IP_INADDRERRORS) \ FN(IP_INNOROUTES) \ + FN(IP_LOCAL_SOURCE) \ + FN(IP_INVALID_SOURCE) \ + FN(IP_LOCALNET) \ + FN(IP_INVALID_DEST) \ FN(PKT_TOO_BIG) \ FN(DUP_FRAG) \ FN(FRAG_REASM_TIMEOUT) \ @@ -100,6 +104,7 @@ FN(IP_TUNNEL_ECN) \ FN(TUNNEL_TXINFO) \ FN(LOCAL_MAC) \ + FN(ARP_PVLAN_DISABLE) \ FNe(MAX) /** @@ -373,6 +378,21 @@ enum skb_drop_reason { * IPSTATS_MIB_INADDRERRORS */ SKB_DROP_REASON_IP_INNOROUTES, + /** @SKB_DROP_REASON_IP_LOCAL_SOURCE: the source ip is local */ + SKB_DROP_REASON_IP_LOCAL_SOURCE, + /** + * @SKB_DROP_REASON_IP_INVALID_SOURCE: the source ip is invalid: + * 1) source ip is multicast or limited broadcast + * 2) source ip is zero and not IGMP + */ + SKB_DROP_REASON_IP_INVALID_SOURCE, + /** @SKB_DROP_REASON_IP_LOCALNET: source or dest ip is local net */ + SKB_DROP_REASON_IP_LOCALNET, + /** + * @SKB_DROP_REASON_IP_INVALID_DEST: the dest ip is invalid: + * 1) dest ip is 0 + */ + SKB_DROP_REASON_IP_INVALID_DEST, /** * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the * MTU) @@ -459,6 +479,12 @@ enum skb_drop_reason { */ SKB_DROP_REASON_LOCAL_MAC, /** + * @SKB_DROP_REASON_ARP_PVLAN_DISABLE: packet which is not IP is + * forwarded to the in_dev, and the proxy_arp_pvlan is not + * enabled. + */ + SKB_DROP_REASON_ARP_PVLAN_DISABLE, + /** * @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which * shouldn't be used as a real 'reason' - only for tracing code gen */ diff --git a/include/net/eee.h b/include/net/eee.h index 84837aba3cd9..cfab1b8bc46a 100644 --- a/include/net/eee.h +++ b/include/net/eee.h @@ -13,10 +13,7 @@ struct eee_config { static inline bool eeecfg_mac_can_tx_lpi(const struct eee_config *eeecfg) { /* eee_enabled is the master on/off */ - if (!eeecfg->eee_enabled || !eeecfg->tx_lpi_enabled) - return false; - - return true; + return eeecfg->eee_enabled && eeecfg->tx_lpi_enabled; } static inline void eeecfg_to_eee(struct ethtool_keee *eee, diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index b6e44f4eaa4c..a113c11ab56b 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -452,6 +452,18 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, dscp_t dscp, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); +static inline enum skb_drop_reason +fib_validate_source_reason(struct sk_buff *skb, __be32 src, __be32 dst, + dscp_t dscp, int oif, struct net_device *dev, + struct in_device *idev, u32 *itag) +{ + int err = fib_validate_source(skb, src, dst, dscp, oif, dev, idev, + itag); + if (err < 0) + return -err; + return SKB_NOT_DROPPED_YET; +} + #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b4f246cdcca4..a97c9f85ae9a 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -7672,12 +7672,12 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb) * * - change_vif_links(0x11) * - unassign_vif_chanctx(link_id=0) + * - assign_vif_chanctx(link_id=4) * - change_sta_links(0x11) for each affected STA (the AP) * (TDLS connections on now inactive links should be torn down) * - remove group keys on the old link (link_id 0) * - add new group keys (GTK/IGTK/BIGTK) on the new link (link_id 4) * - change_sta_links(0x10) for each affected STA (the AP) - * - assign_vif_chanctx(link_id=4) * - change_vif_links(0x10) * * Return: 0 on success. An error code otherwise. diff --git a/include/net/route.h b/include/net/route.h index 0a690adfdff5..84cb1e04f5cd 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -199,30 +199,34 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 return ip_route_output_key(net, fl4); } -int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, - struct in_device *in_dev, u32 *itag); -int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev); -int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, +enum skb_drop_reason +ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, dscp_t dscp, struct net_device *dev, - const struct sk_buff *hint); - -static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, - dscp_t dscp, struct net_device *devin) + struct in_device *in_dev, u32 *itag); +enum skb_drop_reason +ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev); +enum skb_drop_reason +ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + const struct sk_buff *hint); + +static inline enum skb_drop_reason +ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, dscp_t dscp, + struct net_device *devin) { - int err; + enum skb_drop_reason reason; rcu_read_lock(); - err = ip_route_input_noref(skb, dst, src, dscp, devin); - if (!err) { + reason = ip_route_input_noref(skb, dst, src, dscp, devin); + if (!reason) { skb_dst_force(skb); if (!skb_dst(skb)) - err = -EINVAL; + reason = SKB_DROP_REASON_NOT_SPECIFIED; } rcu_read_unlock(); - return err; + return reason; } void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif, diff --git a/include/net/tls.h b/include/net/tls.h index 3a33924db2bc..61fef2880114 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -390,8 +390,12 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx) static inline bool tls_sw_has_ctx_tx(const struct sock *sk) { - struct tls_context *ctx = tls_get_ctx(sk); + struct tls_context *ctx; + + if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk)) + return false; + ctx = tls_get_ctx(sk); if (!ctx) return false; return !!tls_sw_ctx_tx(ctx); @@ -399,8 +403,12 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk) static inline bool tls_sw_has_ctx_rx(const struct sock *sk) { - struct tls_context *ctx = tls_get_ctx(sk); + struct tls_context *ctx; + + if (!sk_is_inet(sk) || !inet_test_bit(IS_ICSK, sk)) + return false; + ctx = tls_get_ctx(sk); if (!ctx) return false; return !!tls_sw_ctx_rx(ctx); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 719e0ed1e976..a1c353a62c56 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5920,12 +5920,15 @@ static void prev_balance(struct rq *rq, struct task_struct *prev, #ifdef CONFIG_SCHED_CLASS_EXT /* - * SCX requires a balance() call before every pick_next_task() including - * when waking up from SCHED_IDLE. If @start_class is below SCX, start - * from SCX instead. + * SCX requires a balance() call before every pick_task() including when + * waking up from SCHED_IDLE. If @start_class is below SCX, start from + * SCX instead. Also, set a flag to detect missing balance() call. */ - if (scx_enabled() && sched_class_above(&ext_sched_class, start_class)) - start_class = &ext_sched_class; + if (scx_enabled()) { + rq->scx.flags |= SCX_RQ_BAL_PENDING; + if (sched_class_above(&ext_sched_class, start_class)) + start_class = &ext_sched_class; + } #endif /* diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index b5f4b1a5ae98..51b7e04879d7 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -2634,7 +2634,7 @@ static int balance_one(struct rq *rq, struct task_struct *prev) lockdep_assert_rq_held(rq); rq->scx.flags |= SCX_RQ_IN_BALANCE; - rq->scx.flags &= ~SCX_RQ_BAL_KEEP; + rq->scx.flags &= ~(SCX_RQ_BAL_PENDING | SCX_RQ_BAL_KEEP); if (static_branch_unlikely(&scx_ops_cpu_preempt) && unlikely(rq->scx.cpu_released)) { @@ -2948,12 +2948,11 @@ static struct task_struct *pick_task_scx(struct rq *rq) { struct task_struct *prev = rq->curr; struct task_struct *p; + bool prev_on_scx = prev->sched_class == &ext_sched_class; + bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP; + bool kick_idle = false; /* - * If balance_scx() is telling us to keep running @prev, replenish slice - * if necessary and keep running @prev. Otherwise, pop the first one - * from the local DSQ. - * * WORKAROUND: * * %SCX_RQ_BAL_KEEP should be set iff $prev is on SCX as it must just @@ -2962,22 +2961,41 @@ static struct task_struct *pick_task_scx(struct rq *rq) * which then ends up calling pick_task_scx() without preceding * balance_scx(). * - * For now, ignore cases where $prev is not on SCX. This isn't great and - * can theoretically lead to stalls. However, for switch_all cases, this - * happens only while a BPF scheduler is being loaded or unloaded, and, - * for partial cases, fair will likely keep triggering this CPU. + * Keep running @prev if possible and avoid stalling from entering idle + * without balancing. * - * Once fair is fixed, restore WARN_ON_ONCE(). + * Once fair is fixed, remove the workaround and trigger WARN_ON_ONCE() + * if pick_task_scx() is called without preceding balance_scx(). */ - if ((rq->scx.flags & SCX_RQ_BAL_KEEP) && - prev->sched_class == &ext_sched_class) { + if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) { + if (prev_on_scx) { + keep_prev = true; + } else { + keep_prev = false; + kick_idle = true; + } + } else if (unlikely(keep_prev && !prev_on_scx)) { + /* only allowed during transitions */ + WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED); + keep_prev = false; + } + + /* + * If balance_scx() is telling us to keep running @prev, replenish slice + * if necessary and keep running @prev. Otherwise, pop the first one + * from the local DSQ. + */ + if (keep_prev) { p = prev; if (!p->scx.slice) p->scx.slice = SCX_SLICE_DFL; } else { p = first_local_task(rq); - if (!p) + if (!p) { + if (kick_idle) + scx_bpf_kick_cpu(cpu_of(rq), SCX_KICK_IDLE); return NULL; + } if (unlikely(!p->scx.slice)) { if (!scx_rq_bypassing(rq) && !scx_warned_zero_slice) { @@ -4979,7 +4997,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link) if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN), cpu_possible_mask)) { - pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation"); + pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n"); return -EINVAL; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6c54a57275cc..c03b3d7b320e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -751,8 +751,9 @@ enum scx_rq_flags { */ SCX_RQ_ONLINE = 1 << 0, SCX_RQ_CAN_STOP_TICK = 1 << 1, - SCX_RQ_BAL_KEEP = 1 << 2, /* balance decided to keep current */ - SCX_RQ_BYPASSING = 1 << 3, + SCX_RQ_BAL_PENDING = 1 << 2, /* balance hasn't run yet */ + SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */ + SCX_RQ_BYPASSING = 1 << 4, SCX_RQ_IN_WAKEUP = 1 << 16, SCX_RQ_IN_BALANCE = 1 << 17, diff --git a/kernel/signal.c b/kernel/signal.c index 4344860ffcac..cbabb2d05e0a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -419,7 +419,8 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, */ rcu_read_lock(); ucounts = task_ucounts(t); - sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING); + sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, + override_rlimit); rcu_read_unlock(); if (!sigpending) return NULL; diff --git a/kernel/ucount.c b/kernel/ucount.c index 8c07714ff27d..696406939be5 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -307,7 +307,8 @@ void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type) do_dec_rlimit_put_ucounts(ucounts, NULL, type); } -long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type) +long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type, + bool override_rlimit) { /* Caller must hold a reference to ucounts */ struct ucounts *iter; @@ -317,10 +318,11 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type) for (iter = ucounts; iter; iter = iter->ns->ucounts) { long new = atomic_long_add_return(1, &iter->rlimit[type]); if (new < 0 || new > max) - goto unwind; + goto dec_unwind; if (iter == ucounts) ret = new; - max = get_userns_rlimit_max(iter->ns, type); + if (!override_rlimit) + max = get_userns_rlimit_max(iter->ns, type); /* * Grab an extra ucount reference for the caller when * the rlimit count was previously 0. @@ -334,7 +336,6 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type) dec_unwind: dec = atomic_long_sub_return(1, &iter->rlimit[type]); WARN_ON_ONCE(dec < 0); -unwind: do_dec_rlimit_put_ucounts(ucounts, iter, type); return 0; } diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7312ae7c3cc5..67b669d2e70e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2115,6 +2115,16 @@ config FAIL_SUNRPC Provide fault-injection capability for SunRPC and its consumers. +config FAIL_SKB_REALLOC + bool "Fault-injection capability forcing skb to reallocate" + depends on FAULT_INJECTION_DEBUG_FS + help + Provide fault-injection capability that forces the skb to be + reallocated, catching possible invalid pointers to the skb. + + For more information, check + Documentation/dev-tools/fault-injection/fault-injection.rst + config FAULT_INJECTION_CONFIGFS bool "Configfs interface for fault-injection capabilities" depends on FAULT_INJECTION diff --git a/lib/objpool.c b/lib/objpool.c index fd108fe0d095..b998b720c732 100644 --- a/lib/objpool.c +++ b/lib/objpool.c @@ -74,15 +74,21 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, * warm caches and TLB hits. in default vmalloc is used to * reduce the pressure of kernel slab system. as we know, * mimimal size of vmalloc is one page since vmalloc would - * always align the requested size to page size + * always align the requested size to page size. + * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC) + * allocate percpu slot with kmalloc. */ - if ((pool->gfp & GFP_ATOMIC) == GFP_ATOMIC) - slot = kmalloc_node(size, pool->gfp, cpu_to_node(i)); - else + slot = NULL; + + if ((pool->gfp & (GFP_ATOMIC | GFP_KERNEL)) != GFP_ATOMIC) slot = __vmalloc_node(size, sizeof(void *), pool->gfp, cpu_to_node(i), __builtin_return_address(0)); - if (!slot) - return -ENOMEM; + + if (!slot) { + slot = kmalloc_node(size, pool->gfp, cpu_to_node(i)); + if (!slot) + return -ENOMEM; + } memset(slot, 0, size); pool->cpu_slots[i] = slot; diff --git a/mm/damon/core.c b/mm/damon/core.c index a83f3b736d51..511c3f61ab44 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1412,7 +1412,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c, damon_for_each_scheme(s, c) { struct damos_quota *quota = &s->quota; - if (c->passed_sample_intervals != s->next_apply_sis) + if (c->passed_sample_intervals < s->next_apply_sis) continue; if (!s->wmarks.activated) @@ -1456,17 +1456,31 @@ static unsigned long damon_feed_loop_next_input(unsigned long last_input, unsigned long score) { const unsigned long goal = 10000; - unsigned long score_goal_diff = max(goal, score) - min(goal, score); - unsigned long score_goal_diff_bp = score_goal_diff * 10000 / goal; - unsigned long compensation = last_input * score_goal_diff_bp / 10000; /* Set minimum input as 10000 to avoid compensation be zero */ const unsigned long min_input = 10000; + unsigned long score_goal_diff, compensation; + bool over_achieving = score > goal; - if (goal > score) + if (score == goal) + return last_input; + if (score >= goal * 2) + return min_input; + + if (over_achieving) + score_goal_diff = score - goal; + else + score_goal_diff = goal - score; + + if (last_input < ULONG_MAX / score_goal_diff) + compensation = last_input * score_goal_diff / goal; + else + compensation = last_input / goal * score_goal_diff; + + if (over_achieving) + return max(last_input - compensation, min_input); + if (last_input < ULONG_MAX - compensation) return last_input + compensation; - if (last_input > compensation + min_input) - return last_input - compensation; - return min_input; + return ULONG_MAX; } #ifdef CONFIG_PSI @@ -1622,7 +1636,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c) bool has_schemes_to_apply = false; damon_for_each_scheme(s, c) { - if (c->passed_sample_intervals != s->next_apply_sis) + if (c->passed_sample_intervals < s->next_apply_sis) continue; if (!s->wmarks.activated) @@ -1642,9 +1656,9 @@ static void kdamond_apply_schemes(struct damon_ctx *c) } damon_for_each_scheme(s, c) { - if (c->passed_sample_intervals != s->next_apply_sis) + if (c->passed_sample_intervals < s->next_apply_sis) continue; - s->next_apply_sis += + s->next_apply_sis = c->passed_sample_intervals + (s->apply_interval_us ? s->apply_interval_us : c->attrs.aggr_interval) / sample_interval; } @@ -2000,7 +2014,7 @@ static int kdamond_fn(void *data) if (ctx->ops.check_accesses) max_nr_accesses = ctx->ops.check_accesses(ctx); - if (ctx->passed_sample_intervals == next_aggregation_sis) { + if (ctx->passed_sample_intervals >= next_aggregation_sis) { kdamond_merge_regions(ctx, max_nr_accesses / 10, sz_limit); @@ -2018,7 +2032,7 @@ static int kdamond_fn(void *data) sample_interval = ctx->attrs.sample_interval ? ctx->attrs.sample_interval : 1; - if (ctx->passed_sample_intervals == next_aggregation_sis) { + if (ctx->passed_sample_intervals >= next_aggregation_sis) { ctx->next_aggregation_sis = next_aggregation_sis + ctx->attrs.aggr_interval / sample_interval; @@ -2028,7 +2042,7 @@ static int kdamond_fn(void *data) ctx->ops.reset_aggregated(ctx); } - if (ctx->passed_sample_intervals == next_ops_update_sis) { + if (ctx->passed_sample_intervals >= next_ops_update_sis) { ctx->next_ops_update_sis = next_ops_update_sis + ctx->attrs.ops_update_interval / sample_interval; diff --git a/mm/filemap.c b/mm/filemap.c index 36d22968be9a..56fa431c52af 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2625,7 +2625,7 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, if (unlikely(!iov_iter_count(iter))) return 0; - iov_iter_truncate(iter, inode->i_sb->s_maxbytes); + iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos); folio_batch_init(&fbatch); do { @@ -2273,20 +2273,57 @@ struct page *get_dump_page(unsigned long addr) #endif /* CONFIG_ELF_CORE */ #ifdef CONFIG_MIGRATION + +/* + * An array of either pages or folios ("pofs"). Although it may seem tempting to + * avoid this complication, by simply interpreting a list of folios as a list of + * pages, that approach won't work in the longer term, because eventually the + * layouts of struct page and struct folio will become completely different. + * Furthermore, this pof approach avoids excessive page_folio() calls. + */ +struct pages_or_folios { + union { + struct page **pages; + struct folio **folios; + void **entries; + }; + bool has_folios; + long nr_entries; +}; + +static struct folio *pofs_get_folio(struct pages_or_folios *pofs, long i) +{ + if (pofs->has_folios) + return pofs->folios[i]; + return page_folio(pofs->pages[i]); +} + +static void pofs_clear_entry(struct pages_or_folios *pofs, long i) +{ + pofs->entries[i] = NULL; +} + +static void pofs_unpin(struct pages_or_folios *pofs) +{ + if (pofs->has_folios) + unpin_folios(pofs->folios, pofs->nr_entries); + else + unpin_user_pages(pofs->pages, pofs->nr_entries); +} + /* * Returns the number of collected folios. Return value is always >= 0. */ static unsigned long collect_longterm_unpinnable_folios( - struct list_head *movable_folio_list, - unsigned long nr_folios, - struct folio **folios) + struct list_head *movable_folio_list, + struct pages_or_folios *pofs) { unsigned long i, collected = 0; struct folio *prev_folio = NULL; bool drain_allow = true; - for (i = 0; i < nr_folios; i++) { - struct folio *folio = folios[i]; + for (i = 0; i < pofs->nr_entries; i++) { + struct folio *folio = pofs_get_folio(pofs, i); if (folio == prev_folio) continue; @@ -2327,16 +2364,15 @@ static unsigned long collect_longterm_unpinnable_folios( * Returns -EAGAIN if all folios were successfully migrated or -errno for * failure (or partial success). */ -static int migrate_longterm_unpinnable_folios( - struct list_head *movable_folio_list, - unsigned long nr_folios, - struct folio **folios) +static int +migrate_longterm_unpinnable_folios(struct list_head *movable_folio_list, + struct pages_or_folios *pofs) { int ret; unsigned long i; - for (i = 0; i < nr_folios; i++) { - struct folio *folio = folios[i]; + for (i = 0; i < pofs->nr_entries; i++) { + struct folio *folio = pofs_get_folio(pofs, i); if (folio_is_device_coherent(folio)) { /* @@ -2344,7 +2380,7 @@ static int migrate_longterm_unpinnable_folios( * convert the pin on the source folio to a normal * reference. */ - folios[i] = NULL; + pofs_clear_entry(pofs, i); folio_get(folio); gup_put_folio(folio, 1, FOLL_PIN); @@ -2363,8 +2399,8 @@ static int migrate_longterm_unpinnable_folios( * calling folio_isolate_lru() which takes a reference so the * folio won't be freed if it's migrating. */ - unpin_folio(folios[i]); - folios[i] = NULL; + unpin_folio(folio); + pofs_clear_entry(pofs, i); } if (!list_empty(movable_folio_list)) { @@ -2387,12 +2423,26 @@ static int migrate_longterm_unpinnable_folios( return -EAGAIN; err: - unpin_folios(folios, nr_folios); + pofs_unpin(pofs); putback_movable_pages(movable_folio_list); return ret; } +static long +check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs) +{ + LIST_HEAD(movable_folio_list); + unsigned long collected; + + collected = collect_longterm_unpinnable_folios(&movable_folio_list, + pofs); + if (!collected) + return 0; + + return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs); +} + /* * Check whether all folios are *allowed* to be pinned indefinitely (long term). * Rather confusingly, all folios in the range are required to be pinned via @@ -2417,16 +2467,13 @@ err: static long check_and_migrate_movable_folios(unsigned long nr_folios, struct folio **folios) { - unsigned long collected; - LIST_HEAD(movable_folio_list); + struct pages_or_folios pofs = { + .folios = folios, + .has_folios = true, + .nr_entries = nr_folios, + }; - collected = collect_longterm_unpinnable_folios(&movable_folio_list, - nr_folios, folios); - if (!collected) - return 0; - - return migrate_longterm_unpinnable_folios(&movable_folio_list, - nr_folios, folios); + return check_and_migrate_movable_pages_or_folios(&pofs); } /* @@ -2436,22 +2483,13 @@ static long check_and_migrate_movable_folios(unsigned long nr_folios, static long check_and_migrate_movable_pages(unsigned long nr_pages, struct page **pages) { - struct folio **folios; - long i, ret; + struct pages_or_folios pofs = { + .pages = pages, + .has_folios = false, + .nr_entries = nr_pages, + }; - folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL); - if (!folios) { - unpin_user_pages(pages, nr_pages); - return -ENOMEM; - } - - for (i = 0; i < nr_pages; i++) - folios[i] = page_folio(pages[i]); - - ret = check_and_migrate_movable_folios(nr_pages, folios); - - kfree(folios); - return ret; + return check_and_migrate_movable_pages_or_folios(&pofs); } #else static long check_and_migrate_movable_pages(unsigned long nr_pages, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2fb328880b50..5734d5d5060f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3588,10 +3588,27 @@ int split_folio_to_list(struct folio *folio, struct list_head *list) return split_huge_page_to_list_to_order(&folio->page, list, ret); } -void __folio_undo_large_rmappable(struct folio *folio) +/* + * __folio_unqueue_deferred_split() is not to be called directly: + * the folio_unqueue_deferred_split() inline wrapper in mm/internal.h + * limits its calls to those folios which may have a _deferred_list for + * queueing THP splits, and that list is (racily observed to be) non-empty. + * + * It is unsafe to call folio_unqueue_deferred_split() until folio refcount is + * zero: because even when split_queue_lock is held, a non-empty _deferred_list + * might be in use on deferred_split_scan()'s unlocked on-stack list. + * + * If memory cgroups are enabled, split_queue_lock is in the mem_cgroup: it is + * therefore important to unqueue deferred split before changing folio memcg. + */ +bool __folio_unqueue_deferred_split(struct folio *folio) { struct deferred_split *ds_queue; unsigned long flags; + bool unqueued = false; + + WARN_ON_ONCE(folio_ref_count(folio)); + WARN_ON_ONCE(!mem_cgroup_disabled() && !folio_memcg(folio)); ds_queue = get_deferred_split_queue(folio); spin_lock_irqsave(&ds_queue->split_queue_lock, flags); @@ -3603,8 +3620,11 @@ void __folio_undo_large_rmappable(struct folio *folio) MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); } list_del_init(&folio->_deferred_list); + unqueued = true; } spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); + + return unqueued; /* useful for debug warnings */ } /* partially_mapped=false won't clear PG_partially_mapped folio flag */ @@ -3627,14 +3647,11 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped) return; /* - * The try_to_unmap() in page reclaim path might reach here too, - * this may cause a race condition to corrupt deferred split queue. - * And, if page reclaim is already handling the same folio, it is - * unnecessary to handle it again in shrinker. - * - * Check the swapcache flag to determine if the folio is being - * handled by page reclaim since THP swap would add the folio into - * swap cache before calling try_to_unmap(). + * Exclude swapcache: originally to avoid a corrupt deferred split + * queue. Nowadays that is fully prevented by mem_cgroup_swapout(); + * but if page reclaim is already handling the same folio, it is + * unnecessary to handle it again in the shrinker, so excluding + * swapcache here may still be a useful optimization. */ if (folio_test_swapcache(folio)) return; @@ -3718,8 +3735,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, struct deferred_split *ds_queue = &pgdata->deferred_split_queue; unsigned long flags; LIST_HEAD(list); - struct folio *folio, *next; - int split = 0; + struct folio *folio, *next, *prev = NULL; + int split = 0, removed = 0; #ifdef CONFIG_MEMCG if (sc->memcg) @@ -3773,17 +3790,32 @@ next: * in the case it was underused, then consider it used and * don't add it back to split_queue. */ - if (!did_split && !folio_test_partially_mapped(folio)) { + if (did_split) { + ; /* folio already removed from list */ + } else if (!folio_test_partially_mapped(folio)) { list_del_init(&folio->_deferred_list); - ds_queue->split_queue_len--; + removed++; + } else { + /* + * That unlocked list_del_init() above would be unsafe, + * unless its folio is separated from any earlier folios + * left on the list (which may be concurrently unqueued) + * by one safe folio with refcount still raised. + */ + swap(folio, prev); } - folio_put(folio); + if (folio) + folio_put(folio); } spin_lock_irqsave(&ds_queue->split_queue_lock, flags); list_splice_tail(&list, &ds_queue->split_queue); + ds_queue->split_queue_len -= removed; spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); + if (prev) + folio_put(prev); + /* * Stop shrinker if we didn't split any page, but the queue is empty. * This can happen if pages were freed under us. diff --git a/mm/internal.h b/mm/internal.h index 93083bbeeefa..64c2eb0b160e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -108,6 +108,51 @@ static inline void *folio_raw_mapping(const struct folio *folio) return (void *)(mapping & ~PAGE_MAPPING_FLAGS); } +/* + * This is a file-backed mapping, and is about to be memory mapped - invoke its + * mmap hook and safely handle error conditions. On error, VMA hooks will be + * mutated. + * + * @file: File which backs the mapping. + * @vma: VMA which we are mapping. + * + * Returns: 0 if success, error otherwise. + */ +static inline int mmap_file(struct file *file, struct vm_area_struct *vma) +{ + int err = call_mmap(file, vma); + + if (likely(!err)) + return 0; + + /* + * OK, we tried to call the file hook for mmap(), but an error + * arose. The mapping is in an inconsistent state and we most not invoke + * any further hooks on it. + */ + vma->vm_ops = &vma_dummy_vm_ops; + + return err; +} + +/* + * If the VMA has a close hook then close it, and since closing it might leave + * it in an inconsistent state which makes the use of any hooks suspect, clear + * them down by installing dummy empty hooks. + */ +static inline void vma_close(struct vm_area_struct *vma) +{ + if (vma->vm_ops && vma->vm_ops->close) { + vma->vm_ops->close(vma); + + /* + * The mapping is in an inconsistent state, and no further hooks + * may be invoked upon it. + */ + vma->vm_ops = &vma_dummy_vm_ops; + } +} + #ifdef CONFIG_MMU /* Flags for folio_pte_batch(). */ @@ -639,11 +684,11 @@ static inline void folio_set_order(struct folio *folio, unsigned int order) #endif } -void __folio_undo_large_rmappable(struct folio *folio); -static inline void folio_undo_large_rmappable(struct folio *folio) +bool __folio_unqueue_deferred_split(struct folio *folio); +static inline bool folio_unqueue_deferred_split(struct folio *folio) { if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio)) - return; + return false; /* * At this point, there is no one trying to add the folio to @@ -651,9 +696,9 @@ static inline void folio_undo_large_rmappable(struct folio *folio) * to check without acquiring the split_queue_lock. */ if (data_race(list_empty(&folio->_deferred_list))) - return; + return false; - __folio_undo_large_rmappable(folio); + return __folio_unqueue_deferred_split(folio); } static inline struct folio *page_rmappable_folio(struct page *page) diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index 81d8819f13cd..f8744f5630bb 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -848,6 +848,8 @@ static int mem_cgroup_move_account(struct folio *folio, css_get(&to->css); css_put(&from->css); + /* Warning should never happen, so don't worry about refcount non-0 */ + WARN_ON_ONCE(folio_unqueue_deferred_split(folio)); folio->memcg_data = (unsigned long)to; __folio_memcg_unlock(from); @@ -1217,7 +1219,9 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, enum mc_target_type target_type; union mc_target target; struct folio *folio; + bool tried_split_before = false; +retry_pmd: ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { if (mc.precharge < HPAGE_PMD_NR) { @@ -1227,6 +1231,27 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, target_type = get_mctgt_type_thp(vma, addr, *pmd, &target); if (target_type == MC_TARGET_PAGE) { folio = target.folio; + /* + * Deferred split queue locking depends on memcg, + * and unqueue is unsafe unless folio refcount is 0: + * split or skip if on the queue? first try to split. + */ + if (!list_empty(&folio->_deferred_list)) { + spin_unlock(ptl); + if (!tried_split_before) + split_folio(folio); + folio_unlock(folio); + folio_put(folio); + if (tried_split_before) + return 0; + tried_split_before = true; + goto retry_pmd; + } + /* + * So long as that pmd lock is held, the folio cannot + * be racily added to the _deferred_list, because + * __folio_remove_rmap() will find !partially_mapped. + */ if (folio_isolate_lru(folio)) { if (!mem_cgroup_move_account(folio, true, mc.from, mc.to)) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7845c64a2c57..53db98d2c4a1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -431,6 +431,10 @@ static const unsigned int memcg_vm_event_stat[] = { PGDEACTIVATE, PGLAZYFREE, PGLAZYFREED, +#ifdef CONFIG_SWAP + SWPIN_ZERO, + SWPOUT_ZERO, +#endif #ifdef CONFIG_ZSWAP ZSWPIN, ZSWPOUT, @@ -4629,10 +4633,6 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) struct obj_cgroup *objcg; VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - VM_BUG_ON_FOLIO(folio_order(folio) > 1 && - !folio_test_hugetlb(folio) && - !list_empty(&folio->_deferred_list) && - folio_test_partially_mapped(folio), folio); /* * Nobody should be changing or seriously looking at @@ -4679,6 +4679,7 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug) ug->nr_memory += nr_pages; ug->pgpgout++; + WARN_ON_ONCE(folio_unqueue_deferred_split(folio)); folio->memcg_data = 0; } @@ -4790,6 +4791,9 @@ void mem_cgroup_migrate(struct folio *old, struct folio *new) /* Transfer the charge and the css ref */ commit_charge(new, memcg); + + /* Warning should never happen, so don't worry about refcount non-0 */ + WARN_ON_ONCE(folio_unqueue_deferred_split(old)); old->memcg_data = 0; } @@ -4976,6 +4980,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) VM_BUG_ON_FOLIO(oldid, folio); mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); + folio_unqueue_deferred_split(folio); folio->memcg_data = 0; if (!mem_cgroup_is_root(memcg)) diff --git a/mm/migrate.c b/mm/migrate.c index fab84a776088..dfa24e41e8f9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -490,7 +490,7 @@ static int __folio_migrate_mapping(struct address_space *mapping, folio_test_large_rmappable(folio)) { if (!folio_ref_freeze(folio, expected_count)) return -EAGAIN; - folio_undo_large_rmappable(folio); + folio_unqueue_deferred_split(folio); folio_ref_unfreeze(folio, expected_count); } @@ -515,7 +515,7 @@ static int __folio_migrate_mapping(struct address_space *mapping, } /* Take off deferred split queue while frozen and memcg set */ - folio_undo_large_rmappable(folio); + folio_unqueue_deferred_split(folio); /* * Now we know that no one else is looking at the folio: diff --git a/mm/mlock.c b/mm/mlock.c index e3e3dc2b2956..cde076fa7d5e 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -725,14 +725,17 @@ static int apply_mlockall_flags(int flags) } for_each_vma(vmi, vma) { + int error; vm_flags_t newflags; newflags = vma->vm_flags & ~VM_LOCKED_MASK; newflags |= to_add; - /* Ignore errors */ - mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, - newflags); + error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, + newflags); + /* Ignore errors, but prev needs fixing up. */ + if (error) + prev = vma; cond_resched(); } out: diff --git a/mm/mmap.c b/mm/mmap.c index 9841b41e3c76..79d541f1502b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -344,7 +344,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ - vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | + vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; /* Obtain the address to map to. we verify (or select) it and ensure @@ -1358,20 +1358,18 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return do_vmi_munmap(&vmi, mm, start, len, uf, false); } -unsigned long mmap_region(struct file *file, unsigned long addr, +static unsigned long __mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; pgoff_t pglen = PHYS_PFN(len); - struct vm_area_struct *merge; unsigned long charged = 0; struct vma_munmap_struct vms; struct ma_state mas_detach; struct maple_tree mt_detach; unsigned long end = addr + len; - bool writable_file_mapping = false; int error; VMA_ITERATOR(vmi, mm, addr); VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff); @@ -1422,7 +1420,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, /* * clear PTEs while the vma is still in the tree so that rmap * cannot race with the freeing later in the truncate scenario. - * This is also needed for call_mmap(), which is why vm_ops + * This is also needed for mmap_file(), which is why vm_ops * close function is called. */ vms_clean_up_area(&vms, &mas_detach); @@ -1445,35 +1443,35 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vm_flags_init(vma, vm_flags); vma->vm_page_prot = vm_get_page_prot(vm_flags); + if (vma_iter_prealloc(&vmi, vma)) { + error = -ENOMEM; + goto free_vma; + } + if (file) { vma->vm_file = get_file(file); - error = call_mmap(file, vma); + error = mmap_file(file, vma); if (error) - goto unmap_and_free_vma; - - if (vma_is_shared_maywrite(vma)) { - error = mapping_map_writable(file->f_mapping); - if (error) - goto close_and_free_vma; - - writable_file_mapping = true; - } + goto unmap_and_free_file_vma; + /* Drivers cannot alter the address of the VMA. */ + WARN_ON_ONCE(addr != vma->vm_start); /* - * Expansion is handled above, merging is handled below. - * Drivers should not alter the address of the VMA. + * Drivers should not permit writability when previously it was + * disallowed. */ - if (WARN_ON((addr != vma->vm_start))) { - error = -EINVAL; - goto close_and_free_vma; - } + VM_WARN_ON_ONCE(vm_flags != vma->vm_flags && + !(vm_flags & VM_MAYWRITE) && + (vma->vm_flags & VM_MAYWRITE)); vma_iter_config(&vmi, addr, end); /* - * If vm_flags changed after call_mmap(), we should try merge + * If vm_flags changed after mmap_file(), we should try merge * vma again as we may succeed this time. */ if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) { + struct vm_area_struct *merge; + vmg.flags = vma->vm_flags; /* If this fails, state is reset ready for a reattempt. */ merge = vma_merge_new_range(&vmg); @@ -1491,7 +1489,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, vma = merge; /* Update vm_flags to pick up the change. */ vm_flags = vma->vm_flags; - goto unmap_writable; + goto file_expanded; } vma_iter_config(&vmi, addr, end); } @@ -1500,26 +1498,15 @@ unsigned long mmap_region(struct file *file, unsigned long addr, } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) - goto free_vma; + goto free_iter_vma; } else { vma_set_anonymous(vma); } - if (map_deny_write_exec(vma, vma->vm_flags)) { - error = -EACCES; - goto close_and_free_vma; - } - - /* Allow architectures to sanity-check the vm_flags */ - if (!arch_validate_flags(vma->vm_flags)) { - error = -EINVAL; - goto close_and_free_vma; - } - - if (vma_iter_prealloc(&vmi, vma)) { - error = -ENOMEM; - goto close_and_free_vma; - } +#ifdef CONFIG_SPARC64 + /* TODO: Fix SPARC ADI! */ + WARN_ON_ONCE(!arch_validate_flags(vm_flags)); +#endif /* Lock the VMA since it is modified after insertion into VMA tree */ vma_start_write(vma); @@ -1533,10 +1520,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, */ khugepaged_enter_vma(vma, vma->vm_flags); - /* Once vma denies write, undo our temporary denial count */ -unmap_writable: - if (writable_file_mapping) - mapping_unmap_writable(file->f_mapping); +file_expanded: file = vma->vm_file; ksm_add_vma(vma); expanded: @@ -1569,24 +1553,17 @@ expanded: vma_set_page_prot(vma); - validate_mm(mm); return addr; -close_and_free_vma: - if (file && !vms.closed_vm_ops && vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - - if (file || vma->vm_file) { -unmap_and_free_vma: - fput(vma->vm_file); - vma->vm_file = NULL; +unmap_and_free_file_vma: + fput(vma->vm_file); + vma->vm_file = NULL; - vma_iter_set(&vmi, vma->vm_end); - /* Undo any partial mapping done by a device driver. */ - unmap_region(&vmi.mas, vma, vmg.prev, vmg.next); - } - if (writable_file_mapping) - mapping_unmap_writable(file->f_mapping); + vma_iter_set(&vmi, vma->vm_end); + /* Undo any partial mapping done by a device driver. */ + unmap_region(&vmi.mas, vma, vmg.prev, vmg.next); +free_iter_vma: + vma_iter_free(&vmi); free_vma: vm_area_free(vma); unacct_error: @@ -1596,10 +1573,43 @@ unacct_error: abort_munmap: vms_abort_munmap_vmas(&vms, &mas_detach); gather_failed: - validate_mm(mm); return error; } +unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf) +{ + unsigned long ret; + bool writable_file_mapping = false; + + /* Check to see if MDWE is applicable. */ + if (map_deny_write_exec(vm_flags, vm_flags)) + return -EACCES; + + /* Allow architectures to sanity-check the vm_flags. */ + if (!arch_validate_flags(vm_flags)) + return -EINVAL; + + /* Map writable and ensure this isn't a sealed memfd. */ + if (file && is_shared_maywrite(vm_flags)) { + int error = mapping_map_writable(file->f_mapping); + + if (error) + return error; + writable_file_mapping = true; + } + + ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf); + + /* Clear our write mapping regardless of error. */ + if (writable_file_mapping) + mapping_unmap_writable(file->f_mapping); + + validate_mm(current->mm); + return ret; +} + static int __vm_munmap(unsigned long start, size_t len, bool unlock) { int ret; @@ -1934,7 +1944,7 @@ void exit_mmap(struct mm_struct *mm) do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); - remove_vma(vma, /* unreachable = */ true, /* closed = */ false); + remove_vma(vma, /* unreachable = */ true); count++; cond_resched(); vma = vma_next(&vmi); diff --git a/mm/mprotect.c b/mm/mprotect.c index 0c5d6d06107d..6f450af3252e 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -810,7 +810,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, break; } - if (map_deny_write_exec(vma, newflags)) { + if (map_deny_write_exec(vma->vm_flags, newflags)) { error = -EACCES; break; } diff --git a/mm/nommu.c b/mm/nommu.c index 385b0c15add8..9cb6e99215e2 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -573,7 +573,7 @@ static int delete_vma_from_mm(struct vm_area_struct *vma) VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start); vma_iter_config(&vmi, vma->vm_start, vma->vm_end); - if (vma_iter_prealloc(&vmi, vma)) { + if (vma_iter_prealloc(&vmi, NULL)) { pr_warn("Allocation of vma tree for process %d failed\n", current->pid); return -ENOMEM; @@ -589,8 +589,7 @@ static int delete_vma_from_mm(struct vm_area_struct *vma) */ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) { - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); + vma_close(vma); if (vma->vm_file) fput(vma->vm_file); put_nommu_region(vma->vm_region); @@ -843,7 +842,7 @@ static unsigned long determine_vm_flags(struct file *file, { unsigned long vm_flags; - vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags); + vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags); if (!file) { /* @@ -885,7 +884,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma) { int ret; - ret = call_mmap(vma->vm_file, vma); + ret = mmap_file(vma->vm_file, vma); if (ret == 0) { vma->vm_region->vm_top = vma->vm_region->vm_end; return 0; @@ -918,7 +917,7 @@ static int do_mmap_private(struct vm_area_struct *vma, * happy. */ if (capabilities & NOMMU_MAP_DIRECT) { - ret = call_mmap(vma->vm_file, vma); + ret = mmap_file(vma->vm_file, vma); /* shouldn't return success if we're not sharing */ if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags))) ret = -ENOSYS; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 34b3eb74630a..45e12658438e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -635,6 +635,8 @@ compaction_capture(struct capture_control *capc, struct page *page, static inline void account_freepages(struct zone *zone, int nr_pages, int migratetype) { + lockdep_assert_held(&zone->lock); + if (is_migrate_isolate(migratetype)) return; @@ -642,6 +644,9 @@ static inline void account_freepages(struct zone *zone, int nr_pages, if (is_migrate_cma(migratetype)) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); + else if (is_migrate_highatomic(migratetype)) + WRITE_ONCE(zone->nr_free_highatomic, + zone->nr_free_highatomic + nr_pages); } /* Used for pages not on another list */ @@ -961,9 +966,8 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) break; case 2: /* the second tail page: deferred_list overlaps ->mapping */ - if (unlikely(!list_empty(&folio->_deferred_list) && - folio_test_partially_mapped(folio))) { - bad_page(page, "partially mapped folio on deferred list"); + if (unlikely(!list_empty(&folio->_deferred_list))) { + bad_page(page, "on deferred list"); goto out; } break; @@ -1044,6 +1048,7 @@ __always_inline bool free_pages_prepare(struct page *page, bool skip_kasan_poison = should_skip_kasan_poison(page); bool init = want_init_on_free(); bool compound = PageCompound(page); + struct folio *folio = page_folio(page); VM_BUG_ON_PAGE(PageTail(page), page); @@ -1053,6 +1058,20 @@ __always_inline bool free_pages_prepare(struct page *page, if (memcg_kmem_online() && PageMemcgKmem(page)) __memcg_kmem_uncharge_page(page, order); + /* + * In rare cases, when truncation or holepunching raced with + * munlock after VM_LOCKED was cleared, Mlocked may still be + * found set here. This does not indicate a problem, unless + * "unevictable_pgs_cleared" appears worryingly large. + */ + if (unlikely(folio_test_mlocked(folio))) { + long nr_pages = folio_nr_pages(folio); + + __folio_clear_mlocked(folio); + zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); + count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); + } + if (unlikely(PageHWPoison(page)) && !order) { /* Do not let hwpoison pages hit pcplists/buddy */ reset_page_owner(page, order); @@ -2682,7 +2701,6 @@ void free_unref_folios(struct folio_batch *folios) unsigned long pfn = folio_pfn(folio); unsigned int order = folio_order(folio); - folio_undo_large_rmappable(folio); if (!free_pages_prepare(&folio->page, order)) continue; /* @@ -3081,11 +3099,10 @@ static inline long __zone_watermark_unusable_free(struct zone *z, /* * If the caller does not have rights to reserves below the min - * watermark then subtract the high-atomic reserves. This will - * over-estimate the size of the atomic reserve but it avoids a search. + * watermark then subtract the free pages reserved for highatomic. */ if (likely(!(alloc_flags & ALLOC_RESERVES))) - unusable_free += z->nr_reserved_highatomic; + unusable_free += READ_ONCE(z->nr_free_highatomic); #ifdef CONFIG_CMA /* If allocation can't use CMA areas don't use free CMA pages */ diff --git a/mm/page_io.c b/mm/page_io.c index 69536a2b3c13..01749b99fb54 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -204,7 +204,9 @@ static bool is_folio_zero_filled(struct folio *folio) static void swap_zeromap_folio_set(struct folio *folio) { + struct obj_cgroup *objcg = get_obj_cgroup_from_folio(folio); struct swap_info_struct *sis = swp_swap_info(folio->swap); + int nr_pages = folio_nr_pages(folio); swp_entry_t entry; unsigned int i; @@ -212,6 +214,12 @@ static void swap_zeromap_folio_set(struct folio *folio) entry = page_swap_entry(folio_page(folio, i)); set_bit(swp_offset(entry), sis->zeromap); } + + count_vm_events(SWPOUT_ZERO, nr_pages); + if (objcg) { + count_objcg_events(objcg, SWPOUT_ZERO, nr_pages); + obj_cgroup_put(objcg); + } } static void swap_zeromap_folio_clear(struct folio *folio) @@ -503,6 +511,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret) static bool swap_read_folio_zeromap(struct folio *folio) { int nr_pages = folio_nr_pages(folio); + struct obj_cgroup *objcg; bool is_zeromap; /* @@ -517,6 +526,13 @@ static bool swap_read_folio_zeromap(struct folio *folio) if (!is_zeromap) return false; + objcg = get_obj_cgroup_from_folio(folio); + count_vm_events(SWPIN_ZERO, nr_pages); + if (objcg) { + count_objcg_events(objcg, SWPIN_ZERO, nr_pages); + obj_cgroup_put(objcg); + } + folio_zero_range(folio, 0, folio_size(folio)); folio_mark_uptodate(folio); return true; diff --git a/mm/shmem.c b/mm/shmem.c index 4ba1d00fabda..e87f5d6799a7 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2733,9 +2733,6 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) if (ret) return ret; - /* arm64 - allow memory tagging on RAM-based files */ - vm_flags_set(vma, VM_MTE_ALLOWED); - file_accessed(file); /* This is anonymous shared memory if it is unlinked at the time of mmap */ if (inode->i_nlink) diff --git a/mm/slab_common.c b/mm/slab_common.c index 552b92dfdac7..893d32059915 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -380,8 +380,11 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, unsigned int usersize, void (*ctor)(void *)) { + unsigned long mask = 0; + unsigned int idx; kmem_buckets *b; - int idx; + + BUILD_BUG_ON(ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]) > BITS_PER_LONG); /* * When the separate buckets API is not built in, just return @@ -403,7 +406,7 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) { char *short_size, *cache_name; unsigned int cache_useroffset, cache_usersize; - unsigned int size; + unsigned int size, aligned_idx; if (!kmalloc_caches[KMALLOC_NORMAL][idx]) continue; @@ -416,10 +419,6 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, if (WARN_ON(!short_size)) goto fail; - cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1); - if (WARN_ON(!cache_name)) - goto fail; - if (useroffset >= size) { cache_useroffset = 0; cache_usersize = 0; @@ -427,18 +426,28 @@ kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, cache_useroffset = useroffset; cache_usersize = min(size - cache_useroffset, usersize); } - (*b)[idx] = kmem_cache_create_usercopy(cache_name, size, + + aligned_idx = __kmalloc_index(size, false); + if (!(*b)[aligned_idx]) { + cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1); + if (WARN_ON(!cache_name)) + goto fail; + (*b)[aligned_idx] = kmem_cache_create_usercopy(cache_name, size, 0, flags, cache_useroffset, cache_usersize, ctor); - kfree(cache_name); - if (WARN_ON(!(*b)[idx])) - goto fail; + kfree(cache_name); + if (WARN_ON(!(*b)[aligned_idx])) + goto fail; + set_bit(aligned_idx, &mask); + } + if (idx != aligned_idx) + (*b)[idx] = (*b)[aligned_idx]; } return b; fail: - for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) + for_each_set_bit(idx, &mask, ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL])) kmem_cache_destroy((*b)[idx]); kmem_cache_free(kmem_buckets_cache, b); diff --git a/mm/swap.c b/mm/swap.c index 835bdf324b76..59f30a981c6f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -78,20 +78,6 @@ static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp, lruvec_del_folio(*lruvecp, folio); __folio_clear_lru_flags(folio); } - - /* - * In rare cases, when truncation or holepunching raced with - * munlock after VM_LOCKED was cleared, Mlocked may still be - * found set here. This does not indicate a problem, unless - * "unevictable_pgs_cleared" appears worryingly large. - */ - if (unlikely(folio_test_mlocked(folio))) { - long nr_pages = folio_nr_pages(folio); - - __folio_clear_mlocked(folio); - zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); - count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); - } } /* @@ -121,7 +107,7 @@ void __folio_put(struct folio *folio) } page_cache_release(folio); - folio_undo_large_rmappable(folio); + folio_unqueue_deferred_split(folio); mem_cgroup_uncharge(folio); free_unref_page(&folio->page, folio_order(folio)); } @@ -988,7 +974,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) free_huge_folio(folio); continue; } - folio_undo_large_rmappable(folio); + folio_unqueue_deferred_split(folio); __page_cache_release(folio, &lruvec, &flags); if (j != i) diff --git a/mm/swapfile.c b/mm/swapfile.c index 46bd4b1a3c07..9c85bd46ab7f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -929,7 +929,7 @@ static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, si->highest_bit = 0; del_from_avail_list(si); - if (vm_swap_full()) + if (si->cluster_info && vm_swap_full()) schedule_work(&si->reclaim_work); } } @@ -323,11 +323,10 @@ static bool can_vma_merge_right(struct vma_merge_struct *vmg, /* * Close a vm structure and free it. */ -void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed) +void remove_vma(struct vm_area_struct *vma, bool unreachable) { might_sleep(); - if (!closed && vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); + vma_close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); @@ -1115,9 +1114,7 @@ void vms_clean_up_area(struct vma_munmap_struct *vms, vms_clear_ptes(vms, mas_detach, true); mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - vms->closed_vm_ops = true; + vma_close(vma); } /* @@ -1160,7 +1157,7 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, /* Remove and clean up vmas */ mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) - remove_vma(vma, /* = */ false, vms->closed_vm_ops); + remove_vma(vma, /* unreachable = */ false); vm_unacct_memory(vms->nr_accounted); validate_mm(mm); @@ -1684,8 +1681,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, return new_vma; out_vma_link: - if (new_vma->vm_ops && new_vma->vm_ops->close) - new_vma->vm_ops->close(new_vma); + vma_close(new_vma); if (new_vma->vm_file) fput(new_vma->vm_file); @@ -42,8 +42,7 @@ struct vma_munmap_struct { int vma_count; /* Number of vmas that will be removed */ bool unlock; /* Unlock after the munmap */ bool clear_ptes; /* If there are outstanding PTE to be cleared */ - bool closed_vm_ops; /* call_mmap() was encountered, so vmas may be closed */ - /* 1 byte hole */ + /* 2 byte hole */ unsigned long nr_pages; /* Number of pages being removed */ unsigned long locked_vm; /* Number of locked pages */ unsigned long nr_accounted; /* Number of VM_ACCOUNT pages */ @@ -198,7 +197,6 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms, vms->unmap_start = FIRST_USER_ADDRESS; vms->unmap_end = USER_PGTABLES_CEILING; vms->clear_ptes = false; - vms->closed_vm_ops = false; } #endif @@ -269,7 +267,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool unlock); -void remove_vma(struct vm_area_struct *vma, bool unreachable, bool closed); +void remove_vma(struct vm_area_struct *vma, bool unreachable); void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next); diff --git a/mm/vmscan.c b/mm/vmscan.c index ddaaff67642e..28ba2b06fc7d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1476,7 +1476,7 @@ free_it: */ nr_reclaimed += nr_pages; - folio_undo_large_rmappable(folio); + folio_unqueue_deferred_split(folio); if (folio_batch_add(&free_folios, folio) == 0) { mem_cgroup_uncharge_folios(&free_folios); try_to_unmap_flush(); @@ -1864,7 +1864,7 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec, if (unlikely(folio_put_testzero(folio))) { __folio_clear_lru_flags(folio); - folio_undo_large_rmappable(folio); + folio_unqueue_deferred_split(folio); if (folio_batch_add(&free_folios, folio) == 0) { spin_unlock_irq(&lruvec->lru_lock); mem_cgroup_uncharge_folios(&free_folios); diff --git a/mm/vmstat.c b/mm/vmstat.c index b5a4cea423e1..ac6a5aa34eab 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1415,6 +1415,8 @@ const char * const vmstat_text[] = { #ifdef CONFIG_SWAP "swap_ra", "swap_ra_hit", + "swpin_zero", + "swpout_zero", #ifdef CONFIG_KSM "ksm_swpin_copy", #endif diff --git a/mm/zswap.c b/mm/zswap.c index 162013952074..0030ce8fecfc 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1053,7 +1053,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, count_vm_event(ZSWPWB); if (entry->objcg) - count_objcg_event(entry->objcg, ZSWPWB); + count_objcg_events(entry->objcg, ZSWPWB, 1); zswap_entry_free(entry); @@ -1483,7 +1483,7 @@ bool zswap_store(struct folio *folio) if (objcg) { obj_cgroup_charge_zswap(objcg, entry->length); - count_objcg_event(objcg, ZSWPOUT); + count_objcg_events(objcg, ZSWPOUT, 1); } /* @@ -1577,7 +1577,7 @@ bool zswap_load(struct folio *folio) count_vm_event(ZSWPIN); if (entry->objcg) - count_objcg_event(entry->objcg, ZSWPIN); + count_objcg_events(entry->objcg, ZSWPIN, 1); if (swapcache) { zswap_entry_free(entry); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 96d097b21d13..0ac354db8177 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3788,8 +3788,6 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); - if (conn && hci_dev_test_flag(hdev, HCI_MGMT)) - mgmt_device_connected(hdev, conn, NULL, 0); hci_dev_unlock(hdev); if (conn) { diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 7f2f40cef5fe..451e45b9a6a5 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -373,8 +373,8 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); struct net_device *dev = skb->dev, *br_indev; const struct iphdr *iph = ip_hdr(skb); + enum skb_drop_reason reason; struct rtable *rt; - int err; br_indev = nf_bridge_get_physindev(skb, net); if (!br_indev) { @@ -390,9 +390,9 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ } nf_bridge->in_prerouting = 0; if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) { - err = ip_route_input(skb, iph->daddr, iph->saddr, - ip4h_dscp(iph), dev); - if (err) { + reason = ip_route_input(skb, iph->daddr, iph->saddr, + ip4h_dscp(iph), dev); + if (reason) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a @@ -402,7 +402,8 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ * martian destinations: loopback destinations and destination * 0.0.0.0. In both cases the packet will be dropped because the * destination is the loopback device and not the bridge. */ - if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) + if (reason != SKB_DROP_REASON_IP_INADDRERRORS || !in_dev || + IN_DEV_FORWARD(in_dev)) goto free_skb; rt = ip_route_output(net, iph->daddr, 0, diff --git a/net/core/Makefile b/net/core/Makefile index 5a72a87ee0f1..d9326600e289 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -46,3 +46,4 @@ obj-$(CONFIG_OF) += of_net.o obj-$(CONFIG_NET_TEST) += net_test.o obj-$(CONFIG_NET_DEVMEM) += devmem.o obj-$(CONFIG_DEBUG_NET_SMALL_RTNL) += rtnl_net_debug.o +obj-$(CONFIG_FAIL_SKB_REALLOC) += skb_fault_injection.o diff --git a/net/core/filter.c b/net/core/filter.c index 82f92ed0dc72..6625b3f563a4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2232,7 +2232,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb, rcu_read_unlock(); return ret; } - rcu_read_unlock_bh(); + rcu_read_unlock(); if (dst) IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); out_drop: @@ -2355,7 +2355,7 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, struct flowi4 fl4 = { .flowi4_flags = FLOWI_FLAG_ANYSRC, .flowi4_mark = skb->mark, - .flowi4_tos = ip4h->tos & INET_DSCP_MASK, + .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)), .flowi4_oif = dev->ifindex, .flowi4_proto = ip4h->protocol, .daddr = ip4h->daddr, @@ -2604,18 +2604,16 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) static void sk_msg_reset_curr(struct sk_msg *msg) { - u32 i = msg->sg.start; - u32 len = 0; - - do { - len += sk_msg_elem(msg, i)->length; - sk_msg_iter_var_next(i); - if (len >= msg->sg.size) - break; - } while (i != msg->sg.end); + if (!msg->sg.size) { + msg->sg.curr = msg->sg.start; + msg->sg.copybreak = 0; + } else { + u32 i = msg->sg.end; - msg->sg.curr = i; - msg->sg.copybreak = 0; + sk_msg_iter_var_prev(i); + msg->sg.curr = i; + msg->sg.copybreak = msg->sg.data[i].length; + } } static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { @@ -2778,7 +2776,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, sk_msg_iter_var_next(i); } while (i != msg->sg.end); - if (start >= offset + l) + if (start > offset + l) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); @@ -2803,6 +2801,8 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, raw = page_address(page); + if (i == msg->sg.end) + sk_msg_iter_var_prev(i); psge = sk_msg_elem(msg, i); front = start - offset; back = psge->length - front; @@ -2819,7 +2819,13 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, } put_page(sg_page(psge)); - } else if (start - offset) { + new = i; + goto place_new; + } + + if (start - offset) { + if (i == msg->sg.end) + sk_msg_iter_var_prev(i); psge = sk_msg_elem(msg, i); rsge = sk_msg_elem_cpy(msg, i); @@ -2830,39 +2836,44 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, sk_msg_iter_var_next(i); sg_unmark_end(psge); sg_unmark_end(&rsge); - sk_msg_iter_next(msg, end); } /* Slot(s) to place newly allocated data */ + sk_msg_iter_next(msg, end); new = i; + sk_msg_iter_var_next(i); + + if (i == msg->sg.end) { + if (!rsge.length) + goto place_new; + sk_msg_iter_next(msg, end); + goto place_new; + } /* Shift one or two slots as needed */ - if (!copy) { - sge = sk_msg_elem_cpy(msg, i); + sge = sk_msg_elem_cpy(msg, new); + sg_unmark_end(&sge); + nsge = sk_msg_elem_cpy(msg, i); + if (rsge.length) { sk_msg_iter_var_next(i); - sg_unmark_end(&sge); + nnsge = sk_msg_elem_cpy(msg, i); sk_msg_iter_next(msg, end); + } - nsge = sk_msg_elem_cpy(msg, i); + while (i != msg->sg.end) { + msg->sg.data[i] = sge; + sge = nsge; + sk_msg_iter_var_next(i); if (rsge.length) { - sk_msg_iter_var_next(i); + nsge = nnsge; nnsge = sk_msg_elem_cpy(msg, i); - } - - while (i != msg->sg.end) { - msg->sg.data[i] = sge; - sge = nsge; - sk_msg_iter_var_next(i); - if (rsge.length) { - nsge = nnsge; - nnsge = sk_msg_elem_cpy(msg, i); - } else { - nsge = sk_msg_elem_cpy(msg, i); - } + } else { + nsge = sk_msg_elem_cpy(msg, i); } } +place_new: /* Place newly allocated data buffer */ sk_mem_charge(msg->sk, len); msg->sg.size += len; @@ -2891,8 +2902,10 @@ static const struct bpf_func_proto bpf_msg_push_data_proto = { static void sk_msg_shift_left(struct sk_msg *msg, int i) { + struct scatterlist *sge = sk_msg_elem(msg, i); int prev; + put_page(sg_page(sge)); do { prev = i; sk_msg_iter_var_next(i); @@ -2929,6 +2942,9 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, if (unlikely(flags)) return -EINVAL; + if (unlikely(len == 0)) + return 0; + /* First find the starting scatterlist element */ i = msg->sg.start; do { @@ -2941,7 +2957,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, } while (i != msg->sg.end); /* Bounds checks: start and pop must be inside message */ - if (start >= offset + l || last >= msg->sg.size) + if (start >= offset + l || last > msg->sg.size) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); @@ -2970,12 +2986,12 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, */ if (start != offset) { struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); - int a = start; + int a = start - offset; int b = sge->length - pop - a; sk_msg_iter_var_next(i); - if (pop < sge->length - a) { + if (b > 0) { if (space) { sge->length = a; sk_msg_shift_right(msg, i); @@ -2994,7 +3010,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, if (unlikely(!page)) return -ENOMEM; - sge->length = a; orig = sg_page(sge); from = sg_virt(sge); to = page_address(page); @@ -3004,7 +3019,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, put_page(orig); } pop = 0; - } else if (pop >= sge->length - a) { + } else { pop -= (sge->length - a); sge->length = a; } @@ -3038,7 +3053,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, pop -= sge->length; sk_msg_shift_left(msg, i); } - sk_msg_iter_var_next(i); } sk_mem_uncharge(msg->sk, len - pop); diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index e0ca24a58810..ae74634310a3 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -88,6 +88,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, static int bpf_lwt_input_reroute(struct sk_buff *skb) { + enum skb_drop_reason reason; int err = -EINVAL; if (skb->protocol == htons(ETH_P_IP)) { @@ -96,8 +97,9 @@ static int bpf_lwt_input_reroute(struct sk_buff *skb) dev_hold(dev); skb_dst_drop(skb); - err = ip_route_input_noref(skb, iph->daddr, iph->saddr, - ip4h_dscp(iph), dev); + reason = ip_route_input_noref(skb, iph->daddr, iph->saddr, + ip4h_dscp(iph), dev); + err = reason ? -EINVAL : 0; dev_put(dev); } else if (skb->protocol == htons(ETH_P_IPV6)) { skb_dst_drop(skb); @@ -207,7 +209,7 @@ static int bpf_lwt_xmit_reroute(struct sk_buff *skb) fl4.flowi4_oif = oif; fl4.flowi4_mark = skb->mark; fl4.flowi4_uid = sock_net_uid(net, sk); - fl4.flowi4_tos = iph->tos & INET_DSCP_MASK; + fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)); fl4.flowi4_flags = FLOWI_FLAG_ANYSRC; fl4.flowi4_proto = iph->protocol; fl4.daddr = iph->daddr; diff --git a/net/core/page_pool.c b/net/core/page_pool.c index a813d30d2135..f89cf93f6eb4 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -950,6 +950,7 @@ netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool, if (netmem && *offset + size > max_size) { netmem = page_pool_drain_frag(pool, netmem); if (netmem) { + recycle_stat_inc(pool, cached); alloc_stat_inc(pool, fast); goto frag_reset; } @@ -974,7 +975,6 @@ frag_reset: pool->frag_users++; pool->frag_offset = *offset + size; - alloc_stat_inc(pool, fast); return netmem; } EXPORT_SYMBOL(page_pool_alloc_frag_netmem); diff --git a/net/core/skb_fault_injection.c b/net/core/skb_fault_injection.c new file mode 100644 index 000000000000..4235db6bdfad --- /dev/null +++ b/net/core/skb_fault_injection.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/debugfs.h> +#include <linux/fault-inject.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> + +static struct { + struct fault_attr attr; + char devname[IFNAMSIZ]; + bool filtered; +} skb_realloc = { + .attr = FAULT_ATTR_INITIALIZER, + .filtered = false, +}; + +static bool should_fail_net_realloc_skb(struct sk_buff *skb) +{ + struct net_device *net = skb->dev; + + if (skb_realloc.filtered && + strncmp(net->name, skb_realloc.devname, IFNAMSIZ)) + /* device name filter set, but names do not match */ + return false; + + if (!should_fail(&skb_realloc.attr, 1)) + return false; + + return true; +} +ALLOW_ERROR_INJECTION(should_fail_net_realloc_skb, TRUE); + +void skb_might_realloc(struct sk_buff *skb) +{ + if (!should_fail_net_realloc_skb(skb)) + return; + + pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} +EXPORT_SYMBOL(skb_might_realloc); + +static int __init fail_skb_realloc_setup(char *str) +{ + return setup_fault_attr(&skb_realloc.attr, str); +} +__setup("fail_skb_realloc=", fail_skb_realloc_setup); + +static void reset_settings(void) +{ + skb_realloc.filtered = false; + memset(&skb_realloc.devname, 0, IFNAMSIZ); +} + +static ssize_t devname_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + ssize_t ret; + + reset_settings(); + ret = simple_write_to_buffer(&skb_realloc.devname, IFNAMSIZ, + ppos, buffer, count); + if (ret < 0) + return ret; + + skb_realloc.devname[IFNAMSIZ - 1] = '\0'; + /* Remove a possible \n at the end of devname */ + strim(skb_realloc.devname); + + if (strnlen(skb_realloc.devname, IFNAMSIZ)) + skb_realloc.filtered = true; + + return count; +} + +static ssize_t devname_read(struct file *file, + char __user *buffer, + size_t size, loff_t *ppos) +{ + if (!skb_realloc.filtered) + return 0; + + return simple_read_from_buffer(buffer, size, ppos, &skb_realloc.devname, + strlen(skb_realloc.devname)); +} + +static const struct file_operations devname_ops = { + .write = devname_write, + .read = devname_read, +}; + +static int __init fail_skb_realloc_debugfs(void) +{ + umode_t mode = S_IFREG | 0600; + struct dentry *dir; + + dir = fault_create_debugfs_attr("fail_skb_realloc", NULL, + &skb_realloc.attr); + if (IS_ERR(dir)) + return PTR_ERR(dir); + + debugfs_create_file("devname", mode, dir, NULL, &devname_ops); + + return 0; +} + +late_initcall(fail_skb_realloc_debugfs); diff --git a/net/core/sock.c b/net/core/sock.c index 7f398bd07fb7..74729d20cd00 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1047,32 +1047,34 @@ static int sock_reserve_memory(struct sock *sk, int bytes) #ifdef CONFIG_PAGE_POOL -/* This is the number of tokens that the user can SO_DEVMEM_DONTNEED in - * 1 syscall. The limit exists to limit the amount of memory the kernel - * allocates to copy these tokens. +/* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED + * in 1 syscall. The limit exists to limit the amount of memory the kernel + * allocates to copy these tokens, and to prevent looping over the frags for + * too long. */ #define MAX_DONTNEED_TOKENS 128 +#define MAX_DONTNEED_FRAGS 1024 static noinline_for_stack int sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen) { unsigned int num_tokens, i, j, k, netmem_num = 0; struct dmabuf_token *tokens; + int ret = 0, num_frags = 0; netmem_ref netmems[16]; - int ret = 0; if (!sk_is_tcp(sk)) return -EBADF; - if (optlen % sizeof(struct dmabuf_token) || + if (optlen % sizeof(*tokens) || optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS) return -EINVAL; - tokens = kvmalloc_array(optlen, sizeof(*tokens), GFP_KERNEL); + num_tokens = optlen / sizeof(*tokens); + tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL); if (!tokens) return -ENOMEM; - num_tokens = optlen / sizeof(struct dmabuf_token); if (copy_from_sockptr(tokens, optval, optlen)) { kvfree(tokens); return -EFAULT; @@ -1081,24 +1083,28 @@ sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen) xa_lock_bh(&sk->sk_user_frags); for (i = 0; i < num_tokens; i++) { for (j = 0; j < tokens[i].token_count; j++) { + if (++num_frags > MAX_DONTNEED_FRAGS) + goto frag_limit_reached; + netmem_ref netmem = (__force netmem_ref)__xa_erase( &sk->sk_user_frags, tokens[i].token_start + j); - if (netmem && - !WARN_ON_ONCE(!netmem_is_net_iov(netmem))) { - netmems[netmem_num++] = netmem; - if (netmem_num == ARRAY_SIZE(netmems)) { - xa_unlock_bh(&sk->sk_user_frags); - for (k = 0; k < netmem_num; k++) - WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); - netmem_num = 0; - xa_lock_bh(&sk->sk_user_frags); - } - ret++; + if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem))) + continue; + + netmems[netmem_num++] = netmem; + if (netmem_num == ARRAY_SIZE(netmems)) { + xa_unlock_bh(&sk->sk_user_frags); + for (k = 0; k < netmem_num; k++) + WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); + netmem_num = 0; + xa_lock_bh(&sk->sk_user_frags); } + ret++; } } +frag_limit_reached: xa_unlock_bh(&sk->sk_user_frags); for (k = 0; k < netmem_num; k++) WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index da5dba120bc9..d6649246188d 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -618,7 +618,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) by tcp. Feel free to propose better solution. --ANK (980728) */ - if (np->rxopt.all) + if (np->rxopt.all && sk->sk_state != DCCP_LISTEN) opt_skb = skb_clone_and_charge_r(skb, sk); if (sk->sk_state == DCCP_OPEN) { /* Fast path */ diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 0d62363dbd9d..05ce4f8080b3 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -538,6 +538,20 @@ static int ethtool_get_rxnfc_rule_count(struct net_device *dev) return info.rule_cnt; } +/* Max offset for one RSS context */ +static u32 ethtool_get_rss_ctx_max_channel(struct ethtool_rxfh_context *ctx) +{ + u32 max_ring = 0; + u32 i, *tbl; + + if (WARN_ON_ONCE(!ctx)) + return 0; + tbl = ethtool_rxfh_context_indir(ctx); + for (i = 0; i < ctx->indir_size; i++) + max_ring = max(max_ring, tbl[i]); + return max_ring; +} + static int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max) { const struct ethtool_ops *ops = dev->ethtool_ops; @@ -574,10 +588,18 @@ static int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max) if (rule_info.fs.ring_cookie != RX_CLS_FLOW_DISC && rule_info.fs.ring_cookie != RX_CLS_FLOW_WAKE && - !(rule_info.flow_type & FLOW_RSS) && - !ethtool_get_flow_spec_ring_vf(rule_info.fs.ring_cookie)) - max_ring = - max_t(u64, max_ring, rule_info.fs.ring_cookie); + !ethtool_get_flow_spec_ring_vf(rule_info.fs.ring_cookie)) { + u64 ring = rule_info.fs.ring_cookie; + + if (rule_info.flow_type & FLOW_RSS) { + struct ethtool_rxfh_context *ctx; + + ctx = xa_load(&dev->ethtool->rss_ctx, + rule_info.rss_context); + ring += ethtool_get_rss_ctx_max_channel(ctx); + } + max_ring = max_t(u64, max_ring, ring); + } } kvfree(info); @@ -589,6 +611,7 @@ err_free_info: return err; } +/* Max offset across all of a device's RSS contexts */ static u32 ethtool_get_max_rss_ctx_channel(struct net_device *dev) { struct ethtool_rxfh_context *ctx; @@ -596,13 +619,8 @@ static u32 ethtool_get_max_rss_ctx_channel(struct net_device *dev) u32 max_ring = 0; mutex_lock(&dev->ethtool->rss_lock); - xa_for_each(&dev->ethtool->rss_ctx, context, ctx) { - u32 i, *tbl; - - tbl = ethtool_rxfh_context_indir(ctx); - for (i = 0; i < ctx->indir_size; i++) - max_ring = max(max_ring, tbl[i]); - } + xa_for_each(&dev->ethtool->rss_ctx, context, ctx) + max_ring = max(max_ring, ethtool_get_rss_ctx_max_channel(ctx)); mutex_unlock(&dev->ethtool->rss_lock); return max_ring; @@ -611,7 +629,7 @@ static u32 ethtool_get_max_rss_ctx_channel(struct net_device *dev) static u32 ethtool_get_max_rxfh_channel(struct net_device *dev) { struct ethtool_rxfh_param rxfh = {}; - u32 dev_size, current_max; + u32 dev_size, current_max = 0; int ret; /* While we do track whether RSS context has an indirection diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 7da94e26ced6..d86399bcf223 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -992,6 +992,11 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, if (rc) return rc; + /* Nonzero ring with RSS only makes sense if NIC adds them together */ + if (info.flow_type & FLOW_RSS && !ops->cap_rss_rxnfc_adds && + ethtool_get_flow_spec_ring(info.fs.ring_cookie)) + return -EINVAL; + if (ops->get_rxfh) { struct ethtool_rxfh_param rxfh = {}; diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 0c9ce934b490..87bb36a5bdec 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -346,6 +346,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, int rpf, struct in_device *idev, u32 *itag) { struct net *net = dev_net(dev); + enum skb_drop_reason reason; struct flow_keys flkeys; int ret, no_addr; struct fib_result res; @@ -377,9 +378,15 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, if (fib_lookup(net, &fl4, &res, 0)) goto last_resort; - if (res.type != RTN_UNICAST && - (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev))) - goto e_inval; + if (res.type != RTN_UNICAST) { + if (res.type != RTN_LOCAL) { + reason = SKB_DROP_REASON_IP_INVALID_SOURCE; + goto e_inval; + } else if (!IN_DEV_ACCEPT_LOCAL(idev)) { + reason = SKB_DROP_REASON_IP_LOCAL_SOURCE; + goto e_inval; + } + } fib_combine_itag(itag, &res); dev_match = fib_info_nh_uses_dev(res.fi, dev); @@ -412,9 +419,9 @@ last_resort: return 0; e_inval: - return -EINVAL; + return -reason; e_rpf: - return -EXDEV; + return -SKB_DROP_REASON_IP_RPFILTER; } /* Ignore rp_filter for packets protected by IPsec. */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 33eec844a5a0..4f088fa1c2f2 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -545,7 +545,7 @@ static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, orefdst = skb_in->_skb_refdst; /* save old refdst */ skb_dst_set(skb_in, NULL); err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr, - dscp, rt2->dst.dev); + dscp, rt2->dst.dev) ? -EINVAL : 0; dst_release(&rt2->dst); rt2 = skb_rtable(skb_in); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 48e2810f1f27..07036a2943c1 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -132,12 +132,12 @@ static bool frag_expire_skip_icmp(u32 user) */ static void ip_expire(struct timer_list *t) { + enum skb_drop_reason reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT; struct inet_frag_queue *frag = from_timer(frag, t, timer); const struct iphdr *iph; struct sk_buff *head = NULL; struct net *net; struct ipq *qp; - int err; qp = container_of(frag, struct ipq, q); net = qp->q.fqdir->net; @@ -175,14 +175,15 @@ static void ip_expire(struct timer_list *t) /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); - err = ip_route_input_noref(head, iph->daddr, iph->saddr, ip4h_dscp(iph), - head->dev); - if (err) + reason = ip_route_input_noref(head, iph->daddr, iph->saddr, + ip4h_dscp(iph), head->dev); + if (reason) goto out; /* Only an end host needs to send an ICMP * "Fragment Reassembly Timeout" message, per RFC792. */ + reason = SKB_DROP_REASON_FRAG_REASM_TIMEOUT; if (frag_expire_skip_icmp(qp->q.key.v4.user) && (skb_rtable(head)->rt_type != RTN_LOCAL)) goto out; @@ -195,7 +196,7 @@ out: spin_unlock(&qp->q.lock); out_rcu_unlock: rcu_read_unlock(); - kfree_skb_reason(head, SKB_DROP_REASON_FRAG_REASM_TIMEOUT); + kfree_skb_reason(head, reason); ipq_put(qp); } diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 89bb63da6852..f0a4dda246ab 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -322,15 +322,14 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, int err, drop_reason; struct rtable *rt; - drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; - if (ip_can_use_hint(skb, iph, hint)) { - err = ip_route_use_hint(skb, iph->daddr, iph->saddr, - ip4h_dscp(iph), dev, hint); - if (unlikely(err)) + drop_reason = ip_route_use_hint(skb, iph->daddr, iph->saddr, + ip4h_dscp(iph), dev, hint); + if (unlikely(drop_reason)) goto drop_error; } + drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && !skb_dst(skb) && !skb->sk && @@ -362,10 +361,11 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, * how the packet travels inside Linux networking. */ if (!skb_valid_dst(skb)) { - err = ip_route_input_noref(skb, iph->daddr, iph->saddr, - ip4h_dscp(iph), dev); - if (unlikely(err)) + drop_reason = ip_route_input_noref(skb, iph->daddr, iph->saddr, + ip4h_dscp(iph), dev); + if (unlikely(drop_reason)) goto drop_error; + drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; } else { struct in_device *in_dev = __in_dev_get_rcu(dev); @@ -425,10 +425,8 @@ drop: return NET_RX_DROP; drop_error: - if (err == -EXDEV) { - drop_reason = SKB_DROP_REASON_IP_RPFILTER; + if (drop_reason == SKB_DROP_REASON_IP_RPFILTER) __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); - } goto drop; } diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 81e86e5defee..e3321932bec0 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -618,7 +618,7 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev) orefdst = skb->_skb_refdst; skb_dst_set(skb, NULL); err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph), - dev); + dev) ? -EINVAL : 0; rt2 = skb_rtable(skb); if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { skb_dst_drop(skb); diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c index 271dc03fc6db..f0af12a2f70b 100644 --- a/net/ipv4/ipmr_base.c +++ b/net/ipv4/ipmr_base.c @@ -310,7 +310,8 @@ int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb, if (filter->filter_set) flags |= NLM_F_DUMP_FILTERED; - list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) { + list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list, + lockdep_rtnl_is_held()) { if (e < s_e) goto next_entry; if (filter->dev && diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ccdbe9c70132..e5603e84b20d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1678,50 +1678,54 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt) EXPORT_SYMBOL(rt_dst_clone); /* called in rcu_read_lock() section */ -int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, - struct in_device *in_dev, u32 *itag) +enum skb_drop_reason +ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + struct in_device *in_dev, u32 *itag) { - int err; + enum skb_drop_reason reason; /* Primary sanity checks. */ if (!in_dev) - return -EINVAL; + return SKB_DROP_REASON_NOT_SPECIFIED; - if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || - skb->protocol != htons(ETH_P_IP)) - return -EINVAL; + if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) + return SKB_DROP_REASON_IP_INVALID_SOURCE; + + if (skb->protocol != htons(ETH_P_IP)) + return SKB_DROP_REASON_INVALID_PROTO; if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) - return -EINVAL; + return SKB_DROP_REASON_IP_LOCALNET; if (ipv4_is_zeronet(saddr)) { if (!ipv4_is_local_multicast(daddr) && ip_hdr(skb)->protocol != IPPROTO_IGMP) - return -EINVAL; + return SKB_DROP_REASON_IP_INVALID_SOURCE; } else { - err = fib_validate_source(skb, saddr, 0, dscp, 0, dev, in_dev, - itag); - if (err < 0) - return err; + reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0, + dev, in_dev, itag); + if (reason) + return reason; } - return 0; + return SKB_NOT_DROPPED_YET; } /* called in rcu_read_lock() section */ -static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, int our) +static enum skb_drop_reason +ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, int our) { struct in_device *in_dev = __in_dev_get_rcu(dev); unsigned int flags = RTCF_MULTICAST; + enum skb_drop_reason reason; struct rtable *rth; u32 itag = 0; - int err; - err = ip_mc_validate_source(skb, daddr, saddr, dscp, dev, in_dev, - &itag); - if (err) - return err; + reason = ip_mc_validate_source(skb, daddr, saddr, dscp, dev, in_dev, + &itag); + if (reason) + return reason; if (our) flags |= RTCF_LOCAL; @@ -1732,7 +1736,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, false); if (!rth) - return -ENOBUFS; + return SKB_DROP_REASON_NOMEM; #ifdef CONFIG_IP_ROUTE_CLASSID rth->dst.tclassid = itag; @@ -1748,7 +1752,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, skb_dst_drop(skb); skb_dst_set(skb, &rth->dst); - return 0; + return SKB_NOT_DROPPED_YET; } @@ -1778,10 +1782,12 @@ static void ip_handle_martian_source(struct net_device *dev, } /* called in rcu_read_lock() section */ -static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, - struct in_device *in_dev, __be32 daddr, - __be32 saddr, dscp_t dscp) +static enum skb_drop_reason +__mkroute_input(struct sk_buff *skb, const struct fib_result *res, + struct in_device *in_dev, __be32 daddr, + __be32 saddr, dscp_t dscp) { + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct fib_nh_common *nhc = FIB_RES_NHC(*res); struct net_device *dev = nhc->nhc_dev; struct fib_nh_exception *fnhe; @@ -1795,12 +1801,13 @@ static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, out_dev = __in_dev_get_rcu(dev); if (!out_dev) { net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n"); - return -EINVAL; + return reason; } err = fib_validate_source(skb, saddr, daddr, dscp, FIB_RES_OIF(*res), in_dev->dev, in_dev, &itag); if (err < 0) { + reason = -err; ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, saddr); @@ -1828,7 +1835,7 @@ static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, */ if (out_dev == in_dev && IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) { - err = -EINVAL; + reason = SKB_DROP_REASON_ARP_PVLAN_DISABLE; goto cleanup; } } @@ -1851,7 +1858,7 @@ static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, rth = rt_dst_alloc(out_dev->dev, 0, res->type, IN_DEV_ORCONF(out_dev, NOXFRM)); if (!rth) { - err = -ENOBUFS; + reason = SKB_DROP_REASON_NOMEM; goto cleanup; } @@ -1865,9 +1872,9 @@ static int __mkroute_input(struct sk_buff *skb, const struct fib_result *res, lwtunnel_set_redirect(&rth->dst); skb_dst_set(skb, &rth->dst); out: - err = 0; - cleanup: - return err; + reason = SKB_NOT_DROPPED_YET; +cleanup: + return reason; } #ifdef CONFIG_IP_ROUTE_MULTIPATH @@ -2125,9 +2132,10 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, } #endif /* CONFIG_IP_ROUTE_MULTIPATH */ -static int ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, - struct in_device *in_dev, __be32 daddr, - __be32 saddr, dscp_t dscp, struct flow_keys *hkeys) +static enum skb_drop_reason +ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, + struct in_device *in_dev, __be32 daddr, + __be32 saddr, dscp_t dscp, struct flow_keys *hkeys) { #ifdef CONFIG_IP_ROUTE_MULTIPATH if (res->fi && fib_info_num_path(res->fi) > 1) { @@ -2146,43 +2154,50 @@ static int ip_mkroute_input(struct sk_buff *skb, struct fib_result *res, * assuming daddr is valid and the destination is not a local broadcast one. * Uses the provided hint instead of performing a route lookup. */ -int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, - const struct sk_buff *hint) +enum skb_drop_reason +ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + const struct sk_buff *hint) { + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct in_device *in_dev = __in_dev_get_rcu(dev); struct rtable *rt = skb_rtable(hint); struct net *net = dev_net(dev); - int err = -EINVAL; u32 tag = 0; if (!in_dev) - return -EINVAL; + return reason; - if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) + if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) { + reason = SKB_DROP_REASON_IP_INVALID_SOURCE; goto martian_source; + } - if (ipv4_is_zeronet(saddr)) + if (ipv4_is_zeronet(saddr)) { + reason = SKB_DROP_REASON_IP_INVALID_SOURCE; goto martian_source; + } - if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) + if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) { + reason = SKB_DROP_REASON_IP_LOCALNET; goto martian_source; + } if (rt->rt_type != RTN_LOCAL) goto skip_validate_source; - err = fib_validate_source(skb, saddr, daddr, dscp, 0, dev, in_dev, - &tag); - if (err < 0) + reason = fib_validate_source_reason(skb, saddr, daddr, dscp, 0, dev, + in_dev, &tag); + if (reason) goto martian_source; skip_validate_source: skb_dst_copy(skb, hint); - return 0; + return SKB_NOT_DROPPED_YET; martian_source: ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); - return err; + return reason; } /* get device for dst_alloc with local routes */ @@ -2211,10 +2226,12 @@ static struct net_device *ip_rt_get_dev(struct net *net, * called with rcu_read_lock() */ -static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, - struct fib_result *res) +static enum skb_drop_reason +ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + struct fib_result *res) { + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct in_device *in_dev = __in_dev_get_rcu(dev); struct flow_keys *flkeys = NULL, _flkeys; struct net *net = dev_net(dev); @@ -2242,8 +2259,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.flowi4_tun_key.tun_id = 0; skb_dst_drop(skb); - if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) + if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr)) { + reason = SKB_DROP_REASON_IP_INVALID_SOURCE; goto martian_source; + } res->fi = NULL; res->table = NULL; @@ -2253,21 +2272,29 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, /* Accept zero addresses only to limited broadcast; * I even do not know to fix it or not. Waiting for complains :-) */ - if (ipv4_is_zeronet(saddr)) + if (ipv4_is_zeronet(saddr)) { + reason = SKB_DROP_REASON_IP_INVALID_SOURCE; goto martian_source; + } - if (ipv4_is_zeronet(daddr)) + if (ipv4_is_zeronet(daddr)) { + reason = SKB_DROP_REASON_IP_INVALID_DEST; goto martian_destination; + } /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(), * and call it once if daddr or/and saddr are loopback addresses */ if (ipv4_is_loopback(daddr)) { - if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) + if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) { + reason = SKB_DROP_REASON_IP_LOCALNET; goto martian_destination; + } } else if (ipv4_is_loopback(saddr)) { - if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) + if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net)) { + reason = SKB_DROP_REASON_IP_LOCALNET; goto martian_source; + } } /* @@ -2309,10 +2336,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, goto brd_input; } + err = -EINVAL; if (res->type == RTN_LOCAL) { - err = fib_validate_source(skb, saddr, daddr, dscp, 0, dev, - in_dev, &itag); - if (err < 0) + reason = fib_validate_source_reason(skb, saddr, daddr, dscp, + 0, dev, in_dev, &itag); + if (reason) goto martian_source; goto local_input; } @@ -2321,21 +2349,28 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, err = -EHOSTUNREACH; goto no_route; } - if (res->type != RTN_UNICAST) + if (res->type != RTN_UNICAST) { + reason = SKB_DROP_REASON_IP_INVALID_DEST; goto martian_destination; + } make_route: - err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, dscp, flkeys); -out: return err; + reason = ip_mkroute_input(skb, res, in_dev, daddr, saddr, dscp, + flkeys); + +out: + return reason; brd_input: - if (skb->protocol != htons(ETH_P_IP)) - goto e_inval; + if (skb->protocol != htons(ETH_P_IP)) { + reason = SKB_DROP_REASON_INVALID_PROTO; + goto out; + } if (!ipv4_is_zeronet(saddr)) { - err = fib_validate_source(skb, saddr, 0, dscp, 0, dev, in_dev, - &itag); - if (err < 0) + reason = fib_validate_source_reason(skb, saddr, 0, dscp, 0, + dev, in_dev, &itag); + if (reason) goto martian_source; } flags |= RTCF_BROADCAST; @@ -2353,7 +2388,7 @@ local_input: rth = rcu_dereference(nhc->nhc_rth_input); if (rt_cache_valid(rth)) { skb_dst_set_noref(skb, &rth->dst); - err = 0; + reason = SKB_NOT_DROPPED_YET; goto out; } } @@ -2390,7 +2425,7 @@ local_input: rt_add_uncached_list(rth); } skb_dst_set(skb, &rth->dst); - err = 0; + reason = SKB_NOT_DROPPED_YET; goto out; no_route: @@ -2411,12 +2446,8 @@ martian_destination: &daddr, &saddr, dev->name); #endif -e_inval: - err = -EINVAL; - goto out; - e_nobufs: - err = -ENOBUFS; + reason = SKB_DROP_REASON_NOMEM; goto out; martian_source: @@ -2425,9 +2456,10 @@ martian_source: } /* called with rcu_read_lock held */ -static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev, - struct fib_result *res) +static enum skb_drop_reason +ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + struct fib_result *res) { /* Multicast recognition logic is moved from route cache to here. * The problem was that too many Ethernet cards have broken/missing @@ -2441,12 +2473,12 @@ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, * route cache entry is created eventually. */ if (ipv4_is_multicast(daddr)) { + enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct in_device *in_dev = __in_dev_get_rcu(dev); int our = 0; - int err = -EINVAL; if (!in_dev) - return err; + return -EINVAL; our = ip_check_mc_rcu(in_dev, daddr, saddr, ip_hdr(skb)->protocol); @@ -2467,26 +2499,27 @@ static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr, IN_DEV_MFORWARD(in_dev)) #endif ) { - err = ip_route_input_mc(skb, daddr, saddr, dscp, dev, - our); + reason = ip_route_input_mc(skb, daddr, saddr, dscp, + dev, our); } - return err; + return reason; } return ip_route_input_slow(skb, daddr, saddr, dscp, dev, res); } -int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, - dscp_t dscp, struct net_device *dev) +enum skb_drop_reason ip_route_input_noref(struct sk_buff *skb, __be32 daddr, + __be32 saddr, dscp_t dscp, + struct net_device *dev) { + enum skb_drop_reason reason; struct fib_result res; - int err; rcu_read_lock(); - err = ip_route_input_rcu(skb, daddr, saddr, dscp, dev, &res); + reason = ip_route_input_rcu(skb, daddr, saddr, dscp, dev, &res); rcu_read_unlock(); - return err; + return reason; } EXPORT_SYMBOL(ip_route_input_noref); @@ -3298,7 +3331,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, skb->mark = mark; err = ip_route_input_rcu(skb, dst, src, inet_dsfield_to_dscp(rtm->rtm_tos), - dev, &res); + dev, &res) ? -EINVAL : 0; rt = skb_rtable(skb); if (err == 0 && rt->dst.error) diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index c74705ead984..ac1dbd492c22 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -954,10 +954,10 @@ static int input_action_end_dx4_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *orig_dst = skb_dst(skb); + enum skb_drop_reason reason; struct seg6_local_lwt *slwt; struct iphdr *iph; __be32 nhaddr; - int err; slwt = seg6_local_lwtunnel(orig_dst->lwtstate); @@ -967,9 +967,9 @@ static int input_action_end_dx4_finish(struct net *net, struct sock *sk, skb_dst_drop(skb); - err = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev); - if (err) { - kfree_skb(skb); + reason = ip_route_input(skb, nhaddr, iph->saddr, 0, skb->dev); + if (reason) { + kfree_skb_reason(skb, reason); return -EINVAL; } @@ -1174,8 +1174,8 @@ drop: static int input_action_end_dt4(struct sk_buff *skb, struct seg6_local_lwt *slwt) { + enum skb_drop_reason reason; struct iphdr *iph; - int err; if (!decap_and_validate(skb, IPPROTO_IPIP)) goto drop; @@ -1193,8 +1193,8 @@ static int input_action_end_dt4(struct sk_buff *skb, iph = ip_hdr(skb); - err = ip_route_input(skb, iph->daddr, iph->saddr, 0, skb->dev); - if (unlikely(err)) + reason = ip_route_input(skb, iph->daddr, iph->saddr, 0, skb->dev); + if (unlikely(reason)) goto drop; return dst_input(skb); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c748eeae1453..2debdf085a3b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1621,7 +1621,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) by tcp. Feel free to propose better solution. --ANK (980728) */ - if (np->rxopt.all) + if (np->rxopt.all && sk->sk_state != TCP_LISTEN) opt_skb = skb_clone_and_charge_r(skb, sk); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ @@ -1659,8 +1659,6 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (reason) goto reset; } - if (opt_skb) - __kfree_skb(opt_skb); return 0; } } else diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index fe7eab4b681b..f3fbe5a4395e 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -170,28 +170,63 @@ static void sta_rx_agg_reorder_timer_expired(struct timer_list *t) rcu_read_unlock(); } -static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata, - struct sk_buff *skb, - const struct ieee80211_addba_ext_ie *req, - u16 buf_size) +void ieee80211_add_addbaext(struct sk_buff *skb, + const u8 req_addba_ext_data, + u16 buf_size) { - struct ieee80211_addba_ext_ie *resp; + struct ieee80211_addba_ext_ie *addba_ext; u8 *pos; pos = skb_put_zero(skb, 2 + sizeof(struct ieee80211_addba_ext_ie)); *pos++ = WLAN_EID_ADDBA_EXT; *pos++ = sizeof(struct ieee80211_addba_ext_ie); - resp = (struct ieee80211_addba_ext_ie *)pos; - resp->data = req->data & IEEE80211_ADDBA_EXT_NO_FRAG; + addba_ext = (struct ieee80211_addba_ext_ie *)pos; - resp->data |= u8_encode_bits(buf_size >> IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT, - IEEE80211_ADDBA_EXT_BUF_SIZE_MASK); + addba_ext->data = IEEE80211_ADDBA_EXT_NO_FRAG; + if (req_addba_ext_data) + addba_ext->data &= req_addba_ext_data; + + addba_ext->data |= + u8_encode_bits(buf_size >> IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT, + IEEE80211_ADDBA_EXT_BUF_SIZE_MASK); +} + +u8 ieee80211_retrieve_addba_ext_data(struct sta_info *sta, + const void *elem_data, ssize_t elem_len, + u16 *buf_size) +{ + struct ieee802_11_elems *elems; + u8 buf_size_1k, data = 0; + + if (!sta->sta.deflink.he_cap.has_he) + return 0; + + if (elem_len <= 0) + return 0; + + elems = ieee802_11_parse_elems(elem_data, elem_len, true, NULL); + + if (elems && !elems->parse_error && elems->addba_ext_ie) { + data = elems->addba_ext_ie->data; + + if (!sta->sta.deflink.eht_cap.has_eht || !buf_size) + goto free; + + buf_size_1k = u8_get_bits(elems->addba_ext_ie->data, + IEEE80211_ADDBA_EXT_BUF_SIZE_MASK); + *buf_size |= (u16)buf_size_1k << + IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT; + } +free: + kfree(elems); + + return data; } static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid, u8 dialog_token, u16 status, u16 policy, u16 buf_size, u16 timeout, - const struct ieee80211_addba_ext_ie *addbaext) + const u8 req_addba_ext_data) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; @@ -223,8 +258,8 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid, mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); - if (sta->sta.deflink.he_cap.has_he && addbaext) - ieee80211_add_addbaext(sdata, skb, addbaext, buf_size); + if (sta->sta.deflink.he_cap.has_he) + ieee80211_add_addbaext(skb, req_addba_ext_data, buf_size); ieee80211_tx_skb(sdata, skb); } @@ -233,7 +268,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, u16 buf_size, bool tx, bool auto_seq, - const struct ieee80211_addba_ext_ie *addbaext) + const u8 addba_ext_data) { struct ieee80211_local *local = sta->sdata->local; struct tid_ampdu_rx *tid_agg_rx; @@ -419,7 +454,7 @@ end: if (tx) ieee80211_send_addba_resp(sta, sta->sta.addr, tid, dialog_token, status, 1, buf_size, - timeout, addbaext); + timeout, addba_ext_data); } void ieee80211_process_addba_request(struct ieee80211_local *local, @@ -428,9 +463,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, size_t len) { u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num; - struct ieee802_11_elems *elems = NULL; - u8 dialog_token; - int ies_len; + u8 dialog_token, addba_ext_data; /* extract session parameters from addba request frame */ dialog_token = mgmt->u.action.u.addba_req.dialog_token; @@ -443,28 +476,17 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; - ies_len = len - offsetof(struct ieee80211_mgmt, - u.action.u.addba_req.variable); - if (ies_len) { - elems = ieee802_11_parse_elems(mgmt->u.action.u.addba_req.variable, - ies_len, true, NULL); - if (!elems || elems->parse_error) - goto free; - } - - if (sta->sta.deflink.eht_cap.has_eht && elems && elems->addba_ext_ie) { - u8 buf_size_1k = u8_get_bits(elems->addba_ext_ie->data, - IEEE80211_ADDBA_EXT_BUF_SIZE_MASK); - - buf_size |= buf_size_1k << IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT; - } + addba_ext_data = + ieee80211_retrieve_addba_ext_data(sta, + mgmt->u.action.u.addba_req.variable, + len - + offsetof(typeof(*mgmt), + u.action.u.addba_req.variable), + &buf_size); __ieee80211_start_rx_ba_session(sta, dialog_token, timeout, start_seq_num, ba_policy, tid, - buf_size, true, false, - elems ? elems->addba_ext_ie : NULL); -free: - kfree(elems); + buf_size, true, false, addba_ext_data); } void ieee80211_manage_rx_ba_offl(struct ieee80211_vif *vif, diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 04cb45cfb310..61f2cac37728 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -58,23 +58,24 @@ * complete. */ -static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, - const u8 *da, u16 tid, +static void ieee80211_send_addba_request(struct sta_info *sta, u16 tid, u8 dialog_token, u16 start_seq_num, u16 agg_size, u16 timeout) { + struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u16 capab; - skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); - + skb = dev_alloc_skb(sizeof(*mgmt) + + 2 + sizeof(struct ieee80211_addba_ext_ie) + + local->hw.extra_tx_headroom); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); - mgmt = ieee80211_mgmt_ba(skb, da, sdata); + mgmt = ieee80211_mgmt_ba(skb, sta->sta.addr, sdata); skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); @@ -93,6 +94,9 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.addba_req.start_seq_num = cpu_to_le16(start_seq_num << 4); + if (sta->sta.deflink.he_cap.has_he) + ieee80211_add_addbaext(skb, 0, agg_size); + ieee80211_tx_skb_tid(sdata, skb, tid, -1); } @@ -460,8 +464,11 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta, sta->ampdu_mlme.addba_req_num[tid]++; spin_unlock_bh(&sta->lock); - if (sta->sta.deflink.he_cap.has_he) { + if (sta->sta.deflink.eht_cap.has_eht) { buf_size = local->hw.max_tx_aggregation_subframes; + } else if (sta->sta.deflink.he_cap.has_he) { + buf_size = min_t(u16, local->hw.max_tx_aggregation_subframes, + IEEE80211_MAX_AMPDU_BUF_HE); } else { /* * We really should use what the driver told us it will @@ -473,9 +480,8 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta, } /* send AddBA request */ - ieee80211_send_addba_request(sdata, sta->sta.addr, tid, - tid_tx->dialog_token, tid_tx->ssn, - buf_size, tid_tx->timeout); + ieee80211_send_addba_request(sta, tid, tid_tx->dialog_token, + tid_tx->ssn, buf_size, tid_tx->timeout); WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)); } @@ -970,6 +976,13 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; tid = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_TID_MASK); buf_size = u16_get_bits(capab, IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK); + + ieee80211_retrieve_addba_ext_data(sta, + mgmt->u.action.u.addba_resp.variable, + len - offsetof(typeof(*mgmt), + u.action.u.addba_resp.variable), + &buf_size); + buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); txq = sta->sta.txq[tid]; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index f11d7ed9e0e5..61a824ec33da 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -143,7 +143,7 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, } static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata, - struct cfg80211_mbssid_config params, + struct cfg80211_mbssid_config *params, struct ieee80211_bss_conf *link_conf) { struct ieee80211_sub_if_data *tx_sdata; @@ -154,10 +154,10 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata, link_conf->ema_ap = false; link_conf->bssid_indicator = 0; - if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev) + if (sdata->vif.type != NL80211_IFTYPE_AP || !params->tx_wdev) return -EINVAL; - tx_sdata = IEEE80211_WDEV_TO_SUB_IF(params.tx_wdev); + tx_sdata = IEEE80211_WDEV_TO_SUB_IF(params->tx_wdev); if (!tx_sdata) return -EINVAL; @@ -166,9 +166,9 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata, } else { sdata->vif.mbssid_tx_vif = &tx_sdata->vif; link_conf->nontransmitted = true; - link_conf->bssid_index = params.index; + link_conf->bssid_index = params->index; } - if (params.ema) + if (params->ema) link_conf->ema_ap = true; return 0; @@ -1414,7 +1414,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, if (sdata->vif.type == NL80211_IFTYPE_AP && params->mbssid_config.tx_wdev) { err = ieee80211_set_ap_mbssid_options(sdata, - params->mbssid_config, + ¶ms->mbssid_config, link_conf); if (err) return err; diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 1c2b7dd8976a..32390d8a9d75 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -379,7 +379,7 @@ void ieee80211_ba_session_work(struct wiphy *wiphy, struct wiphy_work *work) sta->ampdu_mlme.tid_rx_manage_offl)) __ieee80211_start_rx_ba_session(sta, 0, 0, 0, 1, tid, IEEE80211_MAX_AMPDU_BUF_HT, - false, true, NULL); + false, true, 0); if (test_and_clear_bit(tid + IEEE80211_NUM_TIDS, sta->ampdu_mlme.tid_rx_manage_offl)) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a3ad2d41c694..a00096dd787b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2113,14 +2113,19 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, const u8 *bssid, int link_id); bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old, enum ieee80211_smps_mode smps_mode_new); - +void ieee80211_add_addbaext(struct sk_buff *skb, + const u8 req_addba_ext_data, + u16 buf_size); +u8 ieee80211_retrieve_addba_ext_data(struct sta_info *sta, + const void *elem_data, ssize_t elem_len, + u16 *buf_size); void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool stop); void __ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, u16 buf_size, bool tx, bool auto_seq, - const struct ieee80211_addba_ext_ie *addbaext); + const u8 addba_ext_data); void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, enum ieee80211_agg_stop_reason reason); void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 618289aac0ab..758a0dbfcf78 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -524,7 +524,8 @@ __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) { struct mptcp_pm_addr_entry *entry; - list_for_each_entry(entry, &pernet->local_addr_list, list) { + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list, + lockdep_is_held(&pernet->lock)) { if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) return entry; } diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index 56dfea9862b7..e35178f5205f 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -308,14 +308,17 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info) lock_sock(sk); + spin_lock_bh(&msk->pm.lock); match = mptcp_userspace_pm_lookup_addr_by_id(msk, id_val); if (!match) { GENL_SET_ERR_MSG(info, "address with specified id not found"); + spin_unlock_bh(&msk->pm.lock); release_sock(sk); goto out; } list_move(&match->list, &free_list); + spin_unlock_bh(&msk->pm.lock); mptcp_pm_remove_addrs(msk, &free_list); @@ -560,6 +563,7 @@ int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info) struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; struct net *net = sock_net(skb->sk); + struct mptcp_pm_addr_entry *entry; struct mptcp_sock *msk; int ret = -EINVAL; struct sock *sk; @@ -601,6 +605,17 @@ int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info) if (loc.flags & MPTCP_PM_ADDR_FLAG_BACKUP) bkup = 1; + spin_lock_bh(&msk->pm.lock); + list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) { + if (mptcp_addresses_equal(&entry->addr, &loc.addr, false)) { + if (bkup) + entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP; + else + entry->flags &= ~MPTCP_PM_ADDR_FLAG_BACKUP; + } + } + spin_unlock_bh(&msk->pm.lock); + lock_sock(sk); ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc.addr, &rem.addr, bkup); release_sock(sk); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index b0e9a745ea62..08a72242428c 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2082,7 +2082,8 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) slow = lock_sock_fast(ssk); WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp); - tcp_cleanup_rbuf(ssk, 1); + if (tcp_can_send_ack(ssk)) + tcp_cleanup_rbuf(ssk, 1); unlock_sock_fast(ssk, slow); } } @@ -2205,7 +2206,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, cmsg_flags = MPTCP_CMSG_INQ; while (copied < len) { - int bytes_read; + int err, bytes_read; bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags); if (unlikely(bytes_read < 0)) { @@ -2267,9 +2268,16 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } pr_debug("block timeout %ld\n", timeo); - sk_wait_data(sk, &timeo, NULL); + mptcp_rcv_space_adjust(msk, copied); + err = sk_wait_data(sk, &timeo, NULL); + if (err < 0) { + err = copied ? : err; + goto out_err; + } } + mptcp_rcv_space_adjust(msk, copied); + out_err: if (cmsg_flags && copied >= 0) { if (cmsg_flags & MPTCP_CMSG_TS) @@ -2285,8 +2293,6 @@ out_err: pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n", msk, skb_queue_empty_lockless(&sk->sk_receive_queue), skb_queue_empty(&msk->receive_queue), copied); - if (!(flags & MSG_PEEK)) - mptcp_rcv_space_adjust(msk, copied); release_sock(sk); return copied; @@ -2722,8 +2728,8 @@ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp) return; - close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + - mptcp_close_timeout(sk); + close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp - + tcp_jiffies32 + jiffies + mptcp_close_timeout(sk); /* the close timeout takes precedence on the fail one, and here at least one of * them is active diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 52a7c7233cab..2ea4763a2004 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -393,15 +393,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) static void netlink_sock_destruct(struct sock *sk) { - struct netlink_sock *nlk = nlk_sk(sk); - - if (nlk->cb_running) { - if (nlk->cb.done) - nlk->cb.done(&nlk->cb); - module_put(nlk->cb.module); - kfree_skb(nlk->cb.skb); - } - skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { @@ -414,14 +405,6 @@ static void netlink_sock_destruct(struct sock *sk) WARN_ON(nlk_sk(sk)->groups); } -static void netlink_sock_destruct_work(struct work_struct *work) -{ - struct netlink_sock *nlk = container_of(work, struct netlink_sock, - work); - - sk_free(&nlk->sk); -} - /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on * SMP. Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves @@ -731,12 +714,6 @@ static void deferred_put_nlk_sk(struct rcu_head *head) if (!refcount_dec_and_test(&sk->sk_refcnt)) return; - if (nlk->cb_running && nlk->cb.done) { - INIT_WORK(&nlk->work, netlink_sock_destruct_work); - schedule_work(&nlk->work); - return; - } - sk_free(sk); } @@ -788,6 +765,14 @@ static int netlink_release(struct socket *sock) NETLINK_URELEASE, &n); } + /* Terminate any outstanding dump */ + if (nlk->cb_running) { + if (nlk->cb.done) + nlk->cb.done(&nlk->cb); + module_put(nlk->cb.module); + kfree_skb(nlk->cb.skb); + } + module_put(nlk->module); if (netlink_is_kernel(sk)) { diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 5b0e4e62ab8b..778a3809361f 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -4,7 +4,6 @@ #include <linux/rhashtable.h> #include <linux/atomic.h> -#include <linux/workqueue.h> #include <net/sock.h> /* flags */ @@ -50,7 +49,6 @@ struct netlink_sock { struct rhash_head node; struct rcu_head rcu; - struct work_struct work; }; static inline struct netlink_sock *nlk_sk(struct sock *sk) diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index c268c2b011f4..a8e21060112f 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -32,8 +32,12 @@ static int rfkill_gpio_set_power(void *data, bool blocked) { struct rfkill_gpio_data *rfkill = data; - if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled) - clk_enable(rfkill->clk); + if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled) { + int ret = clk_enable(rfkill->clk); + + if (ret) + return ret; + } gpiod_set_value_cansleep(rfkill->shutdown_gpio, !blocked); gpiod_set_value_cansleep(rfkill->reset_gpio, !blocked); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 04942f8c62e0..7578e27260c9 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1933,7 +1933,8 @@ static void tcf_chain_tp_remove(struct tcf_chain *chain, static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, struct tcf_chain_info *chain_info, u32 protocol, u32 prio, - bool prio_allocate); + bool prio_allocate, + struct netlink_ext_ack *extack); /* Try to insert new proto. * If proto with specified priority already exists, free new proto @@ -1957,8 +1958,7 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, return ERR_PTR(-EAGAIN); } - tp = tcf_chain_tp_find(chain, &chain_info, - protocol, prio, false); + tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, NULL); if (!tp) err = tcf_chain_tp_insert(chain, &chain_info, tp_new); mutex_unlock(&chain->filter_chain_lock); @@ -2018,7 +2018,8 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, struct tcf_chain_info *chain_info, u32 protocol, u32 prio, - bool prio_allocate) + bool prio_allocate, + struct netlink_ext_ack *extack) { struct tcf_proto **pprev; struct tcf_proto *tp; @@ -2029,9 +2030,14 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, pprev = &tp->next) { if (tp->prio >= prio) { if (tp->prio == prio) { - if (prio_allocate || - (tp->protocol != protocol && protocol)) + if (prio_allocate) { + NL_SET_ERR_MSG(extack, "Lowest ID from auto-alloc range already in use"); + return ERR_PTR(-ENOSPC); + } + if (tp->protocol != protocol && protocol) { + NL_SET_ERR_MSG(extack, "Protocol mismatch for filter with specified priority"); return ERR_PTR(-EINVAL); + } } else { tp = NULL; } @@ -2312,9 +2318,8 @@ replay: mutex_lock(&chain->filter_chain_lock); tp = tcf_chain_tp_find(chain, &chain_info, protocol, - prio, prio_allocate); + prio, prio_allocate, extack); if (IS_ERR(tp)) { - NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); err = PTR_ERR(tp); goto errout_locked; } @@ -2539,10 +2544,13 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, mutex_lock(&chain->filter_chain_lock); tp = tcf_chain_tp_find(chain, &chain_info, protocol, - prio, false); - if (!tp || IS_ERR(tp)) { + prio, false, extack); + if (!tp) { + err = -ENOENT; NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); - err = tp ? PTR_ERR(tp) : -ENOENT; + goto errout_locked; + } else if (IS_ERR(tp)) { + err = PTR_ERR(tp); goto errout_locked; } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); @@ -2679,11 +2687,14 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, mutex_lock(&chain->filter_chain_lock); tp = tcf_chain_tp_find(chain, &chain_info, protocol, - prio, false); + prio, false, extack); mutex_unlock(&chain->filter_chain_lock); - if (!tp || IS_ERR(tp)) { + if (!tp) { + err = -ENOENT; NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); - err = tp ? PTR_ERR(tp) : -ENOENT; + goto errout; + } else if (IS_ERR(tp)) { + err = PTR_ERR(tp); goto errout; } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 9412d88a99bc..d3a03c57545b 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -92,6 +92,16 @@ struct tc_u_common { long knodes; }; +static u32 handle2id(u32 h) +{ + return ((h & 0x80000000) ? ((h >> 20) & 0x7FF) : h); +} + +static u32 id2handle(u32 id) +{ + return (id | 0x800U) << 20; +} + static inline unsigned int u32_hash_fold(__be32 key, const struct tc_u32_sel *sel, u8 fshift) @@ -310,7 +320,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr) int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL); if (id < 0) return 0; - return (id | 0x800U) << 20; + return id2handle(id); } static struct hlist_head *tc_u_common_hash; @@ -360,7 +370,7 @@ static int u32_init(struct tcf_proto *tp) return -ENOBUFS; refcount_set(&root_ht->refcnt, 1); - root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000; + root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : id2handle(0); root_ht->prio = tp->prio; root_ht->is_root = true; idr_init(&root_ht->handle_idr); @@ -612,7 +622,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, if (phn == ht) { u32_clear_hw_hnode(tp, ht, extack); idr_destroy(&ht->handle_idr); - idr_remove(&tp_c->handle_idr, ht->handle); + idr_remove(&tp_c->handle_idr, handle2id(ht->handle)); RCU_INIT_POINTER(*hn, ht->next); kfree_rcu(ht, rcu); return 0; @@ -989,7 +999,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, err = u32_replace_hw_hnode(tp, ht, userflags, extack); if (err) { - idr_remove(&tp_c->handle_idr, handle); + idr_remove(&tp_c->handle_idr, handle2id(handle)); kfree(ht); return err; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index b96c849545ae..a9ed2ccab1bd 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -683,7 +683,7 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) struct sock *sk = &sp->inet.sk; struct net *net = sock_net(sk); struct net_device *dev = NULL; - int type; + int type, res, bound_dev_if; type = ipv6_addr_type(in6); if (IPV6_ADDR_ANY == type) @@ -697,14 +697,21 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) if (!(type & IPV6_ADDR_UNICAST)) return 0; - if (sk->sk_bound_dev_if) { - dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); + rcu_read_lock(); + bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); + if (bound_dev_if) { + res = 0; + dev = dev_get_by_index_rcu(net, bound_dev_if); if (!dev) - return 0; + goto out; } - return ipv6_can_nonlocal_bind(net, &sp->inet) || - ipv6_chk_addr(net, in6, dev, 0); + res = ipv6_can_nonlocal_bind(net, &sp->inet) || + ipv6_chk_addr(net, in6, dev, 0); + +out: + rcu_read_unlock(); + return res; } /* This function checks if the address is a valid address to be used for diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 109b7a0bd071..25b28b1434f5 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -836,6 +836,9 @@ static void vsock_sk_destruct(struct sock *sk) { struct vsock_sock *vsk = vsock_sk(sk); + /* Flush MSG_ZEROCOPY leftovers. */ + __skb_queue_purge(&sk->sk_error_queue); + vsock_deassign_transport(vsk); /* When clearing these addresses, there's no need to set the family and diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index ccbd2bc0d210..9acc13ab3f82 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -400,6 +400,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, if (virtio_transport_init_zcopy_skb(vsk, skb, info->msg, can_zcopy)) { + kfree_skb(skb); ret = -ENOMEM; break; } @@ -1109,6 +1110,7 @@ void virtio_transport_destruct(struct vsock_sock *vsk) struct virtio_vsock_sock *vvs = vsk->trans; kfree(vvs); + vsk->trans = NULL; } EXPORT_SYMBOL_GPL(virtio_transport_destruct); @@ -1512,6 +1514,14 @@ virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, return -ENOMEM; } + /* __vsock_release() might have already flushed accept_queue. + * Subsequent enqueues would lead to a memory leak. + */ + if (sk->sk_shutdown == SHUTDOWN_MASK) { + virtio_transport_reset_no_sock(t, skb); + return -ESHUTDOWN; + } + child = vsock_create_connected(sk); if (!child) { virtio_transport_reset_no_sock(t, skb); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 4dac81854721..a5eb92d93074 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -340,12 +340,6 @@ cfg80211_mlme_check_mlo_compat(const struct ieee80211_multi_link_elem *mle_a, return -EINVAL; } - if (ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_a) != - ieee80211_mle_get_eml_med_sync_delay((const u8 *)mle_b)) { - NL_SET_ERR_MSG(extack, "link EML medium sync delay mismatch"); - return -EINVAL; - } - if (ieee80211_mle_get_eml_cap((const u8 *)mle_a) != ieee80211_mle_get_eml_cap((const u8 *)mle_b)) { NL_SET_ERR_MSG(extack, "link EML capabilities mismatch"); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8cc9b968dbd8..9d2edb71f981 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6118,6 +6118,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) !info->attrs[NL80211_ATTR_BEACON_HEAD]) return -EINVAL; + if (info->attrs[NL80211_ATTR_SMPS_MODE] && + nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]) != NL80211_SMPS_OFF) + return -EOPNOTSUPP; + params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) return -ENOMEM; @@ -6267,10 +6271,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) goto out; } - if (info->attrs[NL80211_ATTR_SMPS_MODE] && - nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]) != NL80211_SMPS_OFF) - return -EOPNOTSUPP; - params->pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); if (params->pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) { err = -EOPNOTSUPP; @@ -9795,6 +9795,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev, request = kzalloc(size, GFP_KERNEL); if (!request) return ERR_PTR(-ENOMEM); + request->n_channels = n_channels; if (n_ssids) request->ssids = (void *)request + diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c index f847e832ba14..57565dfd74a2 100644 --- a/samples/landlock/sandboxer.c +++ b/samples/landlock/sandboxer.c @@ -60,6 +60,25 @@ static inline int landlock_restrict_self(const int ruleset_fd, #define ENV_SCOPED_NAME "LL_SCOPED" #define ENV_DELIMITER ":" +static int str2num(const char *numstr, __u64 *num_dst) +{ + char *endptr = NULL; + int err = 0; + __u64 num; + + errno = 0; + num = strtoull(numstr, &endptr, 10); + if (errno != 0) + err = errno; + /* Was the string empty, or not entirely parsed successfully? */ + else if ((*numstr == '\0') || (*endptr != '\0')) + err = EINVAL; + else + *num_dst = num; + + return err; +} + static int parse_path(char *env_path, const char ***const path_list) { int i, num_paths = 0; @@ -160,7 +179,6 @@ static int populate_ruleset_net(const char *const env_var, const int ruleset_fd, char *env_port_name, *env_port_name_next, *strport; struct landlock_net_port_attr net_port = { .allowed_access = allowed_access, - .port = 0, }; env_port_name = getenv(env_var); @@ -171,7 +189,17 @@ static int populate_ruleset_net(const char *const env_var, const int ruleset_fd, env_port_name_next = env_port_name; while ((strport = strsep(&env_port_name_next, ENV_DELIMITER))) { - net_port.port = atoi(strport); + __u64 port; + + if (strcmp(strport, "") == 0) + continue; + + if (str2num(strport, &port)) { + fprintf(stderr, "Failed to parse port at \"%s\"\n", + strport); + goto out_free_name; + } + net_port.port = port; if (landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT, &net_port, 0)) { fprintf(stderr, @@ -262,6 +290,44 @@ out_unset: #define LANDLOCK_ABI_LAST 6 +#define XSTR(s) #s +#define STR(s) XSTR(s) + +/* clang-format off */ + +static const char help[] = + "usage: " ENV_FS_RO_NAME "=\"...\" " ENV_FS_RW_NAME "=\"...\" " + "[other environment variables] %1$s <cmd> [args]...\n" + "\n" + "Execute the given command in a restricted environment.\n" + "Multi-valued settings (lists of ports, paths, scopes) are colon-delimited.\n" + "\n" + "Mandatory settings:\n" + "* " ENV_FS_RO_NAME ": paths allowed to be used in a read-only way\n" + "* " ENV_FS_RW_NAME ": paths allowed to be used in a read-write way\n" + "\n" + "Optional settings (when not set, their associated access check " + "is always allowed, which is different from an empty string which " + "means an empty list):\n" + "* " ENV_TCP_BIND_NAME ": ports allowed to bind (server)\n" + "* " ENV_TCP_CONNECT_NAME ": ports allowed to connect (client)\n" + "* " ENV_SCOPED_NAME ": actions denied on the outside of the landlock domain\n" + " - \"a\" to restrict opening abstract unix sockets\n" + " - \"s\" to restrict sending signals\n" + "\n" + "Example:\n" + ENV_FS_RO_NAME "=\"${PATH}:/lib:/usr:/proc:/etc:/dev/urandom\" " + ENV_FS_RW_NAME "=\"/dev/null:/dev/full:/dev/zero:/dev/pts:/tmp\" " + ENV_TCP_BIND_NAME "=\"9418\" " + ENV_TCP_CONNECT_NAME "=\"80:443\" " + ENV_SCOPED_NAME "=\"a:s\" " + "%1$s bash -i\n" + "\n" + "This sandboxer can use Landlock features up to ABI version " + STR(LANDLOCK_ABI_LAST) ".\n"; + +/* clang-format on */ + int main(const int argc, char *const argv[], char *const *const envp) { const char *cmd_path; @@ -280,47 +346,7 @@ int main(const int argc, char *const argv[], char *const *const envp) }; if (argc < 2) { - fprintf(stderr, - "usage: %s=\"...\" %s=\"...\" %s=\"...\" %s=\"...\" %s=\"...\" %s " - "<cmd> [args]...\n\n", - ENV_FS_RO_NAME, ENV_FS_RW_NAME, ENV_TCP_BIND_NAME, - ENV_TCP_CONNECT_NAME, ENV_SCOPED_NAME, argv[0]); - fprintf(stderr, - "Execute a command in a restricted environment.\n\n"); - fprintf(stderr, - "Environment variables containing paths and ports " - "each separated by a colon:\n"); - fprintf(stderr, - "* %s: list of paths allowed to be used in a read-only way.\n", - ENV_FS_RO_NAME); - fprintf(stderr, - "* %s: list of paths allowed to be used in a read-write way.\n\n", - ENV_FS_RW_NAME); - fprintf(stderr, - "Environment variables containing ports are optional " - "and could be skipped.\n"); - fprintf(stderr, - "* %s: list of ports allowed to bind (server).\n", - ENV_TCP_BIND_NAME); - fprintf(stderr, - "* %s: list of ports allowed to connect (client).\n", - ENV_TCP_CONNECT_NAME); - fprintf(stderr, "* %s: list of scoped IPCs.\n", - ENV_SCOPED_NAME); - fprintf(stderr, - "\nexample:\n" - "%s=\"${PATH}:/lib:/usr:/proc:/etc:/dev/urandom\" " - "%s=\"/dev/null:/dev/full:/dev/zero:/dev/pts:/tmp\" " - "%s=\"9418\" " - "%s=\"80:443\" " - "%s=\"a:s\" " - "%s bash -i\n\n", - ENV_FS_RO_NAME, ENV_FS_RW_NAME, ENV_TCP_BIND_NAME, - ENV_TCP_CONNECT_NAME, ENV_SCOPED_NAME, argv[0]); - fprintf(stderr, - "This sandboxer can use Landlock features " - "up to ABI version %d.\n", - LANDLOCK_ABI_LAST); + fprintf(stderr, help, argv[0]); return 1; } diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh index cdb9f497f87d..66cb707479e6 100755 --- a/samples/pktgen/pktgen_sample01_simple.sh +++ b/samples/pktgen/pktgen_sample01_simple.sh @@ -76,7 +76,7 @@ if [ -n "$DST_PORT" ]; then pg_set $DEV "udp_dst_max $UDP_DST_MAX" fi -[ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM" +[ ! -z "$UDP_CSUM" ] && pg_set $DEV "flag UDPCSUM" # Setup random UDP port src range pg_set $DEV "flag UDPSRC_RND" diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index 6924ed508ebd..377e57e9084f 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -1084,7 +1084,8 @@ static void evm_file_release(struct file *file) if (!S_ISREG(inode->i_mode) || !(mode & FMODE_WRITE)) return; - if (iint && atomic_read(&inode->i_writecount) == 1) + if (iint && iint->flags & EVM_NEW_FILE && + atomic_read(&inode->i_writecount) == 1) iint->flags &= ~EVM_NEW_FILE; } diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c index 4183956c53af..0e627eac9c33 100644 --- a/security/integrity/ima/ima_template_lib.c +++ b/security/integrity/ima/ima_template_lib.c @@ -318,15 +318,21 @@ static int ima_eventdigest_init_common(const u8 *digest, u32 digestsize, hash_algo_name[hash_algo]); } - if (digest) + if (digest) { memcpy(buffer + offset, digest, digestsize); - else + } else { /* * If digest is NULL, the event being recorded is a violation. * Make room for the digest by increasing the offset by the - * hash algorithm digest size. + * hash algorithm digest size. If the hash algorithm is not + * specified increase the offset by IMA_DIGEST_SIZE which + * fits SHA1 or MD5 */ - offset += hash_digest_size[hash_algo]; + if (hash_algo < HASH_ALGO__LAST) + offset += hash_digest_size[hash_algo]; + else + offset += IMA_DIGEST_SIZE; + } return ima_write_template_field_data(buffer, offset + digestsize, fmt, field_data); diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h index 660f76cb69d3..c2c2da691123 100644 --- a/security/integrity/integrity.h +++ b/security/integrity/integrity.h @@ -37,6 +37,8 @@ struct evm_ima_xattr_data { ); u8 data[]; } __packed; +static_assert(offsetof(struct evm_ima_xattr_data, data) == sizeof(struct evm_ima_xattr_data_hdr), + "struct member likely outside of __struct_group()"); /* Only used in the EVM HMAC code. */ struct evm_xattr { @@ -65,6 +67,8 @@ struct ima_digest_data { ); u8 digest[]; } __packed; +static_assert(offsetof(struct ima_digest_data, digest) == sizeof(struct ima_digest_data_hdr), + "struct member likely outside of __struct_group()"); /* * Instead of wrapping the ima_digest_data struct inside a local structure diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 7d79fc8abe21..e31b97a9f175 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -389,37 +389,21 @@ static bool is_nouser_or_private(const struct dentry *dentry) } static access_mask_t -get_raw_handled_fs_accesses(const struct landlock_ruleset *const domain) -{ - access_mask_t access_dom = 0; - size_t layer_level; - - for (layer_level = 0; layer_level < domain->num_layers; layer_level++) - access_dom |= - landlock_get_raw_fs_access_mask(domain, layer_level); - return access_dom; -} - -static access_mask_t get_handled_fs_accesses(const struct landlock_ruleset *const domain) { /* Handles all initially denied by default access rights. */ - return get_raw_handled_fs_accesses(domain) | + return landlock_union_access_masks(domain).fs | LANDLOCK_ACCESS_FS_INITIALLY_DENIED; } -static const struct landlock_ruleset * -get_fs_domain(const struct landlock_ruleset *const domain) -{ - if (!domain || !get_raw_handled_fs_accesses(domain)) - return NULL; - - return domain; -} +static const struct access_masks any_fs = { + .fs = ~0, +}; static const struct landlock_ruleset *get_current_fs_domain(void) { - return get_fs_domain(landlock_get_current_domain()); + return landlock_get_applicable_domain(landlock_get_current_domain(), + any_fs); } /* @@ -1517,7 +1501,8 @@ static int hook_file_open(struct file *const file) access_mask_t open_access_request, full_access_request, allowed_access, optional_access; const struct landlock_ruleset *const dom = - get_fs_domain(landlock_cred(file->f_cred)->domain); + landlock_get_applicable_domain( + landlock_cred(file->f_cred)->domain, any_fs); if (!dom) return 0; diff --git a/security/landlock/net.c b/security/landlock/net.c index c8bcd29bde09..d5dcc4407a19 100644 --- a/security/landlock/net.c +++ b/security/landlock/net.c @@ -39,27 +39,9 @@ int landlock_append_net_rule(struct landlock_ruleset *const ruleset, return err; } -static access_mask_t -get_raw_handled_net_accesses(const struct landlock_ruleset *const domain) -{ - access_mask_t access_dom = 0; - size_t layer_level; - - for (layer_level = 0; layer_level < domain->num_layers; layer_level++) - access_dom |= landlock_get_net_access_mask(domain, layer_level); - return access_dom; -} - -static const struct landlock_ruleset *get_current_net_domain(void) -{ - const struct landlock_ruleset *const dom = - landlock_get_current_domain(); - - if (!dom || !get_raw_handled_net_accesses(dom)) - return NULL; - - return dom; -} +static const struct access_masks any_net = { + .net = ~0, +}; static int current_check_access_socket(struct socket *const sock, struct sockaddr *const address, @@ -72,7 +54,9 @@ static int current_check_access_socket(struct socket *const sock, struct landlock_id id = { .type = LANDLOCK_KEY_NET_PORT, }; - const struct landlock_ruleset *const dom = get_current_net_domain(); + const struct landlock_ruleset *const dom = + landlock_get_applicable_domain(landlock_get_current_domain(), + any_net); if (!dom) return 0; diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h index 61bdbc550172..631e24d4ffe9 100644 --- a/security/landlock/ruleset.h +++ b/security/landlock/ruleset.h @@ -11,6 +11,7 @@ #include <linux/bitops.h> #include <linux/build_bug.h> +#include <linux/kernel.h> #include <linux/mutex.h> #include <linux/rbtree.h> #include <linux/refcount.h> @@ -47,6 +48,15 @@ struct access_masks { access_mask_t scope : LANDLOCK_NUM_SCOPE; }; +union access_masks_all { + struct access_masks masks; + u32 all; +}; + +/* Makes sure all fields are covered. */ +static_assert(sizeof(typeof_member(union access_masks_all, masks)) == + sizeof(typeof_member(union access_masks_all, all))); + typedef u16 layer_mask_t; /* Makes sure all layers can be checked. */ static_assert(BITS_PER_TYPE(layer_mask_t) >= LANDLOCK_MAX_NUM_LAYERS); @@ -260,6 +270,61 @@ static inline void landlock_get_ruleset(struct landlock_ruleset *const ruleset) refcount_inc(&ruleset->usage); } +/** + * landlock_union_access_masks - Return all access rights handled in the + * domain + * + * @domain: Landlock ruleset (used as a domain) + * + * Returns: an access_masks result of the OR of all the domain's access masks. + */ +static inline struct access_masks +landlock_union_access_masks(const struct landlock_ruleset *const domain) +{ + union access_masks_all matches = {}; + size_t layer_level; + + for (layer_level = 0; layer_level < domain->num_layers; layer_level++) { + union access_masks_all layer = { + .masks = domain->access_masks[layer_level], + }; + + matches.all |= layer.all; + } + + return matches.masks; +} + +/** + * landlock_get_applicable_domain - Return @domain if it applies to (handles) + * at least one of the access rights specified + * in @masks + * + * @domain: Landlock ruleset (used as a domain) + * @masks: access masks + * + * Returns: @domain if any access rights specified in @masks is handled, or + * NULL otherwise. + */ +static inline const struct landlock_ruleset * +landlock_get_applicable_domain(const struct landlock_ruleset *const domain, + const struct access_masks masks) +{ + const union access_masks_all masks_all = { + .masks = masks, + }; + union access_masks_all merge = {}; + + if (!domain) + return NULL; + + merge.masks = landlock_union_access_masks(domain); + if (merge.all & masks_all.all) + return domain; + + return NULL; +} + static inline void landlock_add_fs_access_mask(struct landlock_ruleset *const ruleset, const access_mask_t fs_access_mask, @@ -296,18 +361,11 @@ landlock_add_scope_mask(struct landlock_ruleset *const ruleset, } static inline access_mask_t -landlock_get_raw_fs_access_mask(const struct landlock_ruleset *const ruleset, - const u16 layer_level) -{ - return ruleset->access_masks[layer_level].fs; -} - -static inline access_mask_t landlock_get_fs_access_mask(const struct landlock_ruleset *const ruleset, const u16 layer_level) { /* Handles all initially denied by default access rights. */ - return landlock_get_raw_fs_access_mask(ruleset, layer_level) | + return ruleset->access_masks[layer_level].fs | LANDLOCK_ACCESS_FS_INITIALLY_DENIED; } diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index f5a0e7182ec0..c097d356fa45 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -329,7 +329,7 @@ static int add_rule_path_beneath(struct landlock_ruleset *const ruleset, return -ENOMSG; /* Checks that allowed_access matches the @ruleset constraints. */ - mask = landlock_get_raw_fs_access_mask(ruleset, 0); + mask = ruleset->access_masks[0].fs; if ((path_beneath_attr.allowed_access | mask) != mask) return -EINVAL; diff --git a/security/landlock/task.c b/security/landlock/task.c index 4acbd7c40eee..dc7dab78392e 100644 --- a/security/landlock/task.c +++ b/security/landlock/task.c @@ -204,12 +204,17 @@ static bool is_abstract_socket(struct sock *const sock) return false; } +static const struct access_masks unix_scope = { + .scope = LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET, +}; + static int hook_unix_stream_connect(struct sock *const sock, struct sock *const other, struct sock *const newsk) { const struct landlock_ruleset *const dom = - landlock_get_current_domain(); + landlock_get_applicable_domain(landlock_get_current_domain(), + unix_scope); /* Quick return for non-landlocked tasks. */ if (!dom) @@ -225,7 +230,8 @@ static int hook_unix_may_send(struct socket *const sock, struct socket *const other) { const struct landlock_ruleset *const dom = - landlock_get_current_domain(); + landlock_get_applicable_domain(landlock_get_current_domain(), + unix_scope); if (!dom) return 0; @@ -243,6 +249,10 @@ static int hook_unix_may_send(struct socket *const sock, return 0; } +static const struct access_masks signal_scope = { + .scope = LANDLOCK_SCOPE_SIGNAL, +}; + static int hook_task_kill(struct task_struct *const p, struct kernel_siginfo *const info, const int sig, const struct cred *const cred) @@ -256,6 +266,7 @@ static int hook_task_kill(struct task_struct *const p, } else { dom = landlock_get_current_domain(); } + dom = landlock_get_applicable_domain(dom, signal_scope); /* Quick return for non-landlocked tasks. */ if (!dom) @@ -279,7 +290,8 @@ static int hook_file_send_sigiotask(struct task_struct *tsk, /* Lock already held by send_sigio() and send_sigurg(). */ lockdep_assert_held(&fown->lock); - dom = landlock_file(fown->file)->fown_domain; + dom = landlock_get_applicable_domain( + landlock_file(fown->file)->fown_domain, signal_scope); /* Quick return for unowned socket. */ if (!dom) diff --git a/sound/core/ump.c b/sound/core/ump.c index cf22a17e38dd..7d59a0a9b037 100644 --- a/sound/core/ump.c +++ b/sound/core/ump.c @@ -1233,7 +1233,7 @@ static int fill_legacy_mapping(struct snd_ump_endpoint *ump) num = 0; for (i = 0; i < SNDRV_UMP_MAX_GROUPS; i++) - if (group_maps & (1U << i)) + if ((group_maps & (1U << i)) && ump->groups[i].valid) ump->legacy_mapping[num++] = i; return num; diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c index 0b42d6559008..079afa4bd381 100644 --- a/sound/firewire/tascam/amdtp-tascam.c +++ b/sound/firewire/tascam/amdtp-tascam.c @@ -238,7 +238,7 @@ int amdtp_tscm_init(struct amdtp_stream *s, struct fw_unit *unit, err = amdtp_stream_init(s, unit, dir, flags, fmt, process_ctx_payloads, sizeof(struct amdtp_tscm)); if (err < 0) - return 0; + return err; if (dir == AMDTP_OUT_STREAM) { // Use fixed value for FDF field. diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index c74f6742c359..b2bcdf76da30 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -205,8 +205,6 @@ static void cx_auto_shutdown(struct hda_codec *codec) { struct conexant_spec *spec = codec->spec; - snd_hda_gen_shutup_speakers(codec); - /* Turn the problematic codec into D3 to avoid spurious noises from the internal speaker during (and after) reboot */ cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index 438865d5e376..2436e8deb2be 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -231,6 +231,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { .driver_data = &acp6x_card, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21M4"), + } + }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21M5"), } }, @@ -398,6 +405,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { { .driver_data = &acp6x_card, .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "TIMI"), + DMI_MATCH(DMI_PRODUCT_NAME, "Xiaomi Book Pro 14 2022"), + } + }, + { + .driver_data = &acp6x_card, + .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Razer"), DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"), } diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c index ae360c97fe1e..0aeb88abbf52 100644 --- a/sound/soc/codecs/tas2781-fmwlib.c +++ b/sound/soc/codecs/tas2781-fmwlib.c @@ -1992,6 +1992,7 @@ static int tasdevice_dspfw_ready(const struct firmware *fmw, break; case 0x202: case 0x400: + case 0x401: tas_priv->fw_parse_variable_header = fw_parse_variable_header_git; tas_priv->fw_parse_program_data = diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c index de3001f5b9bb..95d4762c9d93 100644 --- a/sound/soc/sof/amd/acp.c +++ b/sound/soc/sof/amd/acp.c @@ -342,11 +342,19 @@ int acp_dma_status(struct acp_dev_data *adata, unsigned char ch) { struct snd_sof_dev *sdev = adata->dev; unsigned int val; + unsigned int acp_dma_ch_sts; int ret = 0; + switch (adata->pci_rev) { + case ACP70_PCI_ID: + acp_dma_ch_sts = ACP70_DMA_CH_STS; + break; + default: + acp_dma_ch_sts = ACP_DMA_CH_STS; + } val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DMA_CNTL_0 + ch * sizeof(u32)); if (val & ACP_DMA_CH_RUN) { - ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, ACP_DMA_CH_STS, val, !val, + ret = snd_sof_dsp_read_poll_timeout(sdev, ACP_DSP_BAR, acp_dma_ch_sts, val, !val, ACP_REG_POLL_INTERVAL, ACP_DMA_COMPLETE_TIMEOUT_US); if (ret < 0) diff --git a/sound/soc/sof/sof-client-probes-ipc4.c b/sound/soc/sof/sof-client-probes-ipc4.c index 796eac0a2e74..603aed222480 100644 --- a/sound/soc/sof/sof-client-probes-ipc4.c +++ b/sound/soc/sof/sof-client-probes-ipc4.c @@ -125,6 +125,7 @@ static int ipc4_probes_init(struct sof_client_dev *cdev, u32 stream_tag, msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_MODULE_MSG); msg.extension = SOF_IPC4_MOD_EXT_DST_MOD_INSTANCE(INVALID_PIPELINE_ID); msg.extension |= SOF_IPC4_MOD_EXT_CORE_ID(0); + msg.extension |= SOF_IPC4_MOD_EXT_PARAM_SIZE(sizeof(cfg) / sizeof(uint32_t)); msg.data_size = sizeof(cfg); msg.data_ptr = &cfg; diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index 7bc4a96b7503..5828f9dd866e 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -317,7 +317,7 @@ static int stm32_sai_get_clk_div(struct stm32_sai_sub_data *sai, int div; div = DIV_ROUND_CLOSEST(input_rate, output_rate); - if (div > SAI_XCR1_MCKDIV_MAX(version)) { + if (div > SAI_XCR1_MCKDIV_MAX(version) || div <= 0) { dev_err(&sai->pdev->dev, "Divider %d out of range\n", div); return -EINVAL; } @@ -378,8 +378,8 @@ static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate, int div; div = stm32_sai_get_clk_div(sai, *prate, rate); - if (div < 0) - return div; + if (div <= 0) + return -EINVAL; mclk->freq = *prate / div; diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c index d1b32ba1e1a2..9e30852de93c 100644 --- a/sound/soc/stm/stm32_spdifrx.c +++ b/sound/soc/stm/stm32_spdifrx.c @@ -939,7 +939,7 @@ static void stm32_spdifrx_remove(struct platform_device *pdev) { struct stm32_spdifrx_data *spdifrx = platform_get_drvdata(pdev); - if (spdifrx->ctrl_chan) + if (!IS_ERR(spdifrx->ctrl_chan)) dma_release_channel(spdifrx->ctrl_chan); if (spdifrx->dmab) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 9945ae55b0d0..bd67027c7677 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1205,6 +1205,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, } break; case USB_ID(0x1bcf, 0x2283): /* NexiGo N930AF FHD Webcam */ + case USB_ID(0x03f0, 0x654a): /* HP 320 FHD Webcam */ if (!strcmp(kctl->id.name, "Mic Capture Volume")) { usb_audio_info(chip, "set resolution quirk: cval->res = 16\n"); diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index e6278a245795..c5fd180357d1 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -2114,6 +2114,8 @@ struct usb_audio_quirk_flags_table { static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { /* Device matches */ + DEVICE_FLG(0x03f0, 0x654a, /* HP 320 FHD Webcam */ + QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x041e, 0x3000, /* Creative SB Extigy */ QUIRK_FLAG_IGNORE_CTL_ERROR), DEVICE_FLG(0x041e, 0x4080, /* Creative Live Cam VF0610 */ diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile index 2d0d255fd0e1..8890fd57b110 100644 --- a/tools/lib/thermal/Makefile +++ b/tools/lib/thermal/Makefile @@ -121,7 +121,9 @@ all: fixdep clean: $(call QUIET_CLEAN, libthermal) $(RM) $(LIBTHERMAL_A) \ - *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBTHERMAL_VERSION) .*.d .*.cmd LIBTHERMAL-CFLAGS $(LIBTHERMAL_PC) + *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBTHERMAL_VERSION) \ + .*.d .*.cmd LIBTHERMAL-CFLAGS $(LIBTHERMAL_PC) \ + $(srctree)/tools/$(THERMAL_UAPI) $(LIBTHERMAL_PC): $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \ diff --git a/tools/lib/thermal/sampling.c b/tools/lib/thermal/sampling.c index 70577423a9f0..f67c1f9ea1d7 100644 --- a/tools/lib/thermal/sampling.c +++ b/tools/lib/thermal/sampling.c @@ -16,6 +16,8 @@ static int handle_thermal_sample(struct nl_msg *n, void *arg) struct thermal_handler_param *thp = arg; struct thermal_handler *th = thp->th; + arg = thp->arg; + genlmsg_parse(nlh, 0, attrs, THERMAL_GENL_ATTR_MAX, NULL); switch (genlhdr->cmd) { diff --git a/tools/net/ynl/cli.py b/tools/net/ynl/cli.py index 9e95016b85b3..41d9fa5c818d 100755 --- a/tools/net/ynl/cli.py +++ b/tools/net/ynl/cli.py @@ -3,10 +3,11 @@ import argparse import json +import pathlib import pprint -import time -import signal +import sys +sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix()) from lib import YnlFamily, Netlink, NlError @@ -18,8 +19,6 @@ class YnlEncoder(json.JSONEncoder): return list(obj) return json.JSONEncoder.default(self, obj) -def handle_timeout(sig, frame): - exit(0) def main(): description = """ @@ -46,7 +45,10 @@ def main(): group.add_argument('--list-ops', action='store_true') group.add_argument('--list-msgs', action='store_true') - parser.add_argument('--sleep', dest='sleep', type=int) + parser.add_argument('--duration', dest='duration', type=int, + help='when subscribed, watch for DURATION seconds') + parser.add_argument('--sleep', dest='duration', type=int, + help='alias for duration') parser.add_argument('--subscribe', dest='ntf', type=str) parser.add_argument('--replace', dest='flags', action='append_const', const=Netlink.NLM_F_REPLACE) @@ -83,10 +85,6 @@ def main(): if args.ntf: ynl.ntf_subscribe(args.ntf) - if args.sleep: - signal.signal(signal.SIGALRM, handle_timeout) - signal.alarm(args.sleep) - if args.list_ops: for op_name, op in ynl.ops.items(): print(op_name, " [", ", ".join(op.modes), "]") @@ -110,8 +108,11 @@ def main(): exit(1) if args.ntf: - for msg in ynl.check_ntf(): - output(msg) + try: + for msg in ynl.poll_ntf(duration=args.duration): + output(msg) + except KeyboardInterrupt: + pass if __name__ == "__main__": diff --git a/tools/net/ynl/ethtool.py b/tools/net/ynl/ethtool.py index 63c471f075ab..ebb0a11f67bf 100755 --- a/tools/net/ynl/ethtool.py +++ b/tools/net/ynl/ethtool.py @@ -3,11 +3,13 @@ import argparse import json +import pathlib import pprint import sys import re import os +sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix()) from lib import YnlFamily def args_to_req(ynl, op_name, args, req): diff --git a/tools/net/ynl/generated/Makefile b/tools/net/ynl/generated/Makefile index 713f5fb9cc2d..7db5240de58a 100644 --- a/tools/net/ynl/generated/Makefile +++ b/tools/net/ynl/generated/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 CC=gcc -CFLAGS=-std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow \ +CFLAGS += -std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow \ -I../lib/ -idirafter $(UAPI_PATH) ifeq ("$(DEBUG)","1") CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan diff --git a/tools/net/ynl/lib/Makefile b/tools/net/ynl/lib/Makefile index 2887cc5de530..94c49cca3dca 100644 --- a/tools/net/ynl/lib/Makefile +++ b/tools/net/ynl/lib/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 CC=gcc -CFLAGS=-std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow +CFLAGS += -std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow ifeq ("$(DEBUG)","1") CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan endif diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py index 92f85698c50e..01ec01a90e76 100644 --- a/tools/net/ynl/lib/ynl.py +++ b/tools/net/ynl/lib/ynl.py @@ -13,6 +13,7 @@ import yaml import ipaddress import uuid import queue +import selectors import time from .nlspec import SpecFamily @@ -907,37 +908,49 @@ class YnlFamily(SpecFamily): msg['msg'] = attrs self.async_msg_queue.put(msg) - def check_ntf(self, interval=0.1): + def check_ntf(self): while True: try: reply = self.sock.recv(self._recv_size, socket.MSG_DONTWAIT) - nms = NlMsgs(reply) - self._recv_dbg_print(reply, nms) - for nl_msg in nms: - if nl_msg.error: - print("Netlink error in ntf!?", os.strerror(-nl_msg.error)) - print(nl_msg) - continue - if nl_msg.done: - print("Netlink done while checking for ntf!?") - continue + except BlockingIOError: + return - decoded = self.nlproto.decode(self, nl_msg, None) - if decoded.cmd() not in self.async_msg_ids: - print("Unexpected msg id while checking for ntf", decoded) - continue + nms = NlMsgs(reply) + self._recv_dbg_print(reply, nms) + for nl_msg in nms: + if nl_msg.error: + print("Netlink error in ntf!?", os.strerror(-nl_msg.error)) + print(nl_msg) + continue + if nl_msg.done: + print("Netlink done while checking for ntf!?") + continue - self.handle_ntf(decoded) - except BlockingIOError: - pass + decoded = self.nlproto.decode(self, nl_msg, None) + if decoded.cmd() not in self.async_msg_ids: + print("Unexpected msg id while checking for ntf", decoded) + continue + + self.handle_ntf(decoded) + def poll_ntf(self, duration=None): + start_time = time.time() + selector = selectors.DefaultSelector() + selector.register(self.sock, selectors.EVENT_READ) + + while True: try: yield self.async_msg_queue.get_nowait() except queue.Empty: - try: - time.sleep(interval) - except KeyboardInterrupt: - return + if duration is not None: + timeout = start_time + duration - time.time() + if timeout <= 0: + return + else: + timeout = None + events = selector.select(timeout) + if events: + self.check_ntf() def operation_do_attributes(self, name): """ diff --git a/tools/net/ynl/samples/Makefile b/tools/net/ynl/samples/Makefile index e194a7565861..c9494a564da4 100644 --- a/tools/net/ynl/samples/Makefile +++ b/tools/net/ynl/samples/Makefile @@ -3,7 +3,7 @@ include ../Makefile.deps CC=gcc -CFLAGS=-std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow \ +CFLAGS += -std=gnu11 -O2 -W -Wall -Wextra -Wno-unused-parameter -Wshadow \ -I../lib/ -I../generated/ -idirafter $(UAPI_PATH) ifeq ("$(DEBUG)","1") CFLAGS += -g -fsanitize=address -fsanitize=leak -static-libasan diff --git a/tools/net/ynl/samples/page-pool.c b/tools/net/ynl/samples/page-pool.c index 332f281ee5cb..e5d521320fbf 100644 --- a/tools/net/ynl/samples/page-pool.c +++ b/tools/net/ynl/samples/page-pool.c @@ -118,7 +118,7 @@ int main(int argc, char **argv) name = if_indextoname(s->ifc, ifname); if (name) printf("%8s", name); - printf("[%d]\t", s->ifc); + printf("[%u]\t", s->ifc); } printf("page pools: %u (zombies: %u)\n", diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index c48b69071111..394b0023b9a3 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -4,12 +4,15 @@ import argparse import collections import filecmp +import pathlib import os import re import shutil +import sys import tempfile import yaml +sys.path.append(pathlib.Path(__file__).resolve().parent.as_posix()) from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation, SpecEnumSet, SpecEnumEntry diff --git a/tools/sched_ext/scx_show_state.py b/tools/sched_ext/scx_show_state.py index 8bc626ede1c4..c4b3fdda9a0b 100644 --- a/tools/sched_ext/scx_show_state.py +++ b/tools/sched_ext/scx_show_state.py @@ -35,6 +35,6 @@ print(f'enabled : {read_static_key("__scx_ops_enabled")}') print(f'switching_all : {read_int("scx_switching_all")}') print(f'switched_all : {read_static_key("__scx_switched_all")}') print(f'enable_state : {ops_state_str(enable_state)} ({enable_state})') -print(f'bypass_depth : {read_atomic("scx_ops_bypass_depth")}') +print(f'bypass_depth : {prog["scx_ops_bypass_depth"].value_()}') print(f'nr_rejected : {read_atomic("scx_nr_rejected")}') print(f'enable_seq : {read_atomic("scx_enable_seq")}') diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c index be3cad2aff77..f8eb7f9d4fd2 100644 --- a/tools/testing/selftests/bpf/prog_tests/mptcp.c +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -69,24 +69,6 @@ struct mptcp_storage { char ca_name[TCP_CA_NAME_MAX]; }; -static struct nstoken *create_netns(void) -{ - SYS(fail, "ip netns add %s", NS_TEST); - SYS(fail, "ip -net %s link set dev lo up", NS_TEST); - - return open_netns(NS_TEST); -fail: - return NULL; -} - -static void cleanup_netns(struct nstoken *nstoken) -{ - if (nstoken) - close_netns(nstoken); - - SYS_NOFAIL("ip netns del %s", NS_TEST); -} - static int start_mptcp_server(int family, const char *addr_str, __u16 port, int timeout_ms) { @@ -206,15 +188,15 @@ out: static void test_base(void) { - struct nstoken *nstoken = NULL; + struct netns_obj *netns = NULL; int server_fd, cgroup_fd; cgroup_fd = test__join_cgroup("/mptcp"); if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) return; - nstoken = create_netns(); - if (!ASSERT_OK_PTR(nstoken, "create_netns")) + netns = netns_new(NS_TEST, true); + if (!ASSERT_OK_PTR(netns, "netns_new")) goto fail; /* without MPTCP */ @@ -237,7 +219,7 @@ with_mptcp: close(server_fd); fail: - cleanup_netns(nstoken); + netns_free(netns); close(cgroup_fd); } @@ -322,21 +304,21 @@ out: static void test_mptcpify(void) { - struct nstoken *nstoken = NULL; + struct netns_obj *netns = NULL; int cgroup_fd; cgroup_fd = test__join_cgroup("/mptcpify"); if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) return; - nstoken = create_netns(); - if (!ASSERT_OK_PTR(nstoken, "create_netns")) + netns = netns_new(NS_TEST, true); + if (!ASSERT_OK_PTR(netns, "netns_new")) goto fail; ASSERT_OK(run_mptcpify(cgroup_fd), "run_mptcpify"); fail: - cleanup_netns(nstoken); + netns_free(netns); close(cgroup_fd); } @@ -414,7 +396,7 @@ close_server: static void test_subflow(void) { struct mptcp_subflow *skel; - struct nstoken *nstoken; + struct netns_obj *netns; int cgroup_fd; cgroup_fd = test__join_cgroup("/mptcp_subflow"); @@ -437,8 +419,8 @@ static void test_subflow(void) if (!ASSERT_OK_PTR(skel->links._getsockopt_subflow, "attach _getsockopt_subflow")) goto skel_destroy; - nstoken = create_netns(); - if (!ASSERT_OK_PTR(nstoken, "create_netns: mptcp_subflow")) + netns = netns_new(NS_TEST, true); + if (!ASSERT_OK_PTR(netns, "netns_new: mptcp_subflow")) goto skel_destroy; if (endpoint_init("subflow") < 0) @@ -447,7 +429,7 @@ static void test_subflow(void) run_subflow(); close_netns: - cleanup_netns(nstoken); + netns_free(netns); skel_destroy: mptcp_subflow__destroy(skel); close_cgroup: diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c index 156cc278e2fc..7c881bca9af5 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c +++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c @@ -57,9 +57,15 @@ __description("null pointer") __success __retval(0) int null_pointer(void) { - int nr = 0; + struct bpf_iter_bits iter; + int err, nr = 0; int *bit; + err = bpf_iter_bits_new(&iter, NULL, 1); + bpf_iter_bits_destroy(&iter); + if (err != -EINVAL) + return 1; + bpf_for_each(bits, bit, NULL, 1) nr++; return nr; @@ -194,15 +200,33 @@ __description("bad words") __success __retval(0) int bad_words(void) { - void *bad_addr = (void *)(3UL << 30); - int nr = 0; + void *bad_addr = (void *)-4095; + struct bpf_iter_bits iter; + volatile int nr; int *bit; + int err; + + err = bpf_iter_bits_new(&iter, bad_addr, 1); + bpf_iter_bits_destroy(&iter); + if (err != -EFAULT) + return 1; + nr = 0; bpf_for_each(bits, bit, bad_addr, 1) nr++; + if (nr != 0) + return 2; + err = bpf_iter_bits_new(&iter, bad_addr, 4); + bpf_iter_bits_destroy(&iter); + if (err != -EFAULT) + return 3; + + nr = 0; bpf_for_each(bits, bit, bad_addr, 4) nr++; + if (nr != 0) + return 4; - return nr; + return 0; } diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 075c93ed143e..e5c7ecbe57e3 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -88,6 +88,10 @@ int ktls; int peek_flag; int skb_use_parser; int txmsg_omit_skb_parser; +int verify_push_start; +int verify_push_len; +int verify_pop_start; +int verify_pop_len; static const struct option long_options[] = { {"help", no_argument, NULL, 'h' }, @@ -420,16 +424,18 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, { bool drop = opt->drop_expected; unsigned char k = 0; + int i, j, fp; FILE *file; - int i, fp; file = tmpfile(); if (!file) { perror("create file for sendpage"); return 1; } - for (i = 0; i < iov_length * cnt; i++, k++) - fwrite(&k, sizeof(char), 1, file); + for (i = 0; i < cnt; i++, k = 0) { + for (j = 0; j < iov_length; j++, k++) + fwrite(&k, sizeof(char), 1, file); + } fflush(file); fseek(file, 0, SEEK_SET); @@ -512,12 +518,41 @@ unwind_iov: return -ENOMEM; } -/* TODO: Add verification logic for push, pull and pop data */ +/* In push or pop test, we need to do some calculations for msg_verify_data */ +static void msg_verify_date_prep(void) +{ + int push_range_end = txmsg_start_push + txmsg_end_push - 1; + int pop_range_end = txmsg_start_pop + txmsg_pop - 1; + + if (txmsg_end_push && txmsg_pop && + txmsg_start_push <= pop_range_end && txmsg_start_pop <= push_range_end) { + /* The push range and the pop range overlap */ + int overlap_len; + + verify_push_start = txmsg_start_push; + verify_pop_start = txmsg_start_pop; + if (txmsg_start_push < txmsg_start_pop) + overlap_len = min(push_range_end - txmsg_start_pop + 1, txmsg_pop); + else + overlap_len = min(pop_range_end - txmsg_start_push + 1, txmsg_end_push); + verify_push_len = max(txmsg_end_push - overlap_len, 0); + verify_pop_len = max(txmsg_pop - overlap_len, 0); + } else { + /* Otherwise */ + verify_push_start = txmsg_start_push; + verify_pop_start = txmsg_start_pop; + verify_push_len = txmsg_end_push; + verify_pop_len = txmsg_pop; + } +} + static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz, - unsigned char *k_p, int *bytes_cnt_p) + unsigned char *k_p, int *bytes_cnt_p, + int *check_cnt_p, int *push_p) { - int i, j, bytes_cnt = *bytes_cnt_p; + int bytes_cnt = *bytes_cnt_p, check_cnt = *check_cnt_p, push = *push_p; unsigned char k = *k_p; + int i, j; for (i = 0, j = 0; i < msg->msg_iovlen && size; i++, j = 0) { unsigned char *d = msg->msg_iov[i].iov_base; @@ -536,6 +571,37 @@ static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz, } for (; j < msg->msg_iov[i].iov_len && size; j++) { + if (push > 0 && + check_cnt == verify_push_start + verify_push_len - push) { + int skipped; +revisit_push: + skipped = push; + if (j + push >= msg->msg_iov[i].iov_len) + skipped = msg->msg_iov[i].iov_len - j; + push -= skipped; + size -= skipped; + j += skipped - 1; + check_cnt += skipped; + continue; + } + + if (verify_pop_len > 0 && check_cnt == verify_pop_start) { + bytes_cnt += verify_pop_len; + check_cnt += verify_pop_len; + k += verify_pop_len; + + if (bytes_cnt == chunk_sz) { + k = 0; + bytes_cnt = 0; + check_cnt = 0; + push = verify_push_len; + } + + if (push > 0 && + check_cnt == verify_push_start + verify_push_len - push) + goto revisit_push; + } + if (d[j] != k++) { fprintf(stderr, "detected data corruption @iov[%i]:%i %02x != %02x, %02x ?= %02x\n", @@ -543,15 +609,20 @@ static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz, return -EDATAINTEGRITY; } bytes_cnt++; + check_cnt++; if (bytes_cnt == chunk_sz) { k = 0; bytes_cnt = 0; + check_cnt = 0; + push = verify_push_len; } size--; } } *k_p = k; *bytes_cnt_p = bytes_cnt; + *check_cnt_p = check_cnt; + *push_p = push; return 0; } @@ -604,12 +675,14 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, } clock_gettime(CLOCK_MONOTONIC, &s->end); } else { + float total_bytes, txmsg_pop_total, txmsg_push_total; int slct, recvp = 0, recv, max_fd = fd; - float total_bytes, txmsg_pop_total; int fd_flags = O_NONBLOCK; struct timeval timeout; unsigned char k = 0; int bytes_cnt = 0; + int check_cnt = 0; + int push = 0; fd_set w; fcntl(fd, fd_flags); @@ -623,12 +696,22 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, * This is really only useful for testing edge cases in code * paths. */ - total_bytes = (float)iov_count * (float)iov_length * (float)cnt; - if (txmsg_apply) + total_bytes = (float)iov_length * (float)cnt; + if (!opt->sendpage) + total_bytes *= (float)iov_count; + if (txmsg_apply) { + txmsg_push_total = txmsg_end_push * (total_bytes / txmsg_apply); txmsg_pop_total = txmsg_pop * (total_bytes / txmsg_apply); - else + } else { + txmsg_push_total = txmsg_end_push * cnt; txmsg_pop_total = txmsg_pop * cnt; + } + total_bytes += txmsg_push_total; total_bytes -= txmsg_pop_total; + if (data) { + msg_verify_date_prep(); + push = verify_push_len; + } err = clock_gettime(CLOCK_MONOTONIC, &s->start); if (err < 0) perror("recv start time"); @@ -701,10 +784,11 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, if (data) { int chunk_sz = opt->sendpage ? - iov_length * cnt : + iov_length : iov_length * iov_count; - errno = msg_verify_data(&msg, recv, chunk_sz, &k, &bytes_cnt); + errno = msg_verify_data(&msg, recv, chunk_sz, &k, &bytes_cnt, + &check_cnt, &push); if (errno) { perror("data verify msg failed"); goto out_errno; @@ -714,7 +798,9 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, recvp, chunk_sz, &k, - &bytes_cnt); + &bytes_cnt, + &check_cnt, + &push); if (errno) { perror("data verify msg_peek failed"); goto out_errno; @@ -796,8 +882,6 @@ static int sendmsg_test(struct sockmap_options *opt) rxpid = fork(); if (rxpid == 0) { - if (txmsg_pop || txmsg_start_pop) - iov_buf -= (txmsg_pop - txmsg_start_pop + 1); if (opt->drop_expected || txmsg_ktls_skb_drop) _exit(0); @@ -1466,8 +1550,8 @@ static void test_send_many(struct sockmap_options *opt, int cgrp) static void test_send_large(struct sockmap_options *opt, int cgrp) { - opt->iov_length = 256; - opt->iov_count = 1024; + opt->iov_length = 8192; + opt->iov_count = 32; opt->rate = 2; test_exec(cgrp, opt); } @@ -1596,11 +1680,13 @@ static void test_txmsg_cork_hangs(int cgrp, struct sockmap_options *opt) static void test_txmsg_pull(int cgrp, struct sockmap_options *opt) { /* Test basic start/end */ + txmsg_pass = 1; txmsg_start = 1; txmsg_end = 2; test_send(opt, cgrp); /* Test >4k pull */ + txmsg_pass = 1; txmsg_start = 4096; txmsg_end = 9182; test_send_large(opt, cgrp); @@ -1628,12 +1714,16 @@ static void test_txmsg_pull(int cgrp, struct sockmap_options *opt) static void test_txmsg_pop(int cgrp, struct sockmap_options *opt) { + bool data = opt->data_test; + /* Test basic pop */ + txmsg_pass = 1; txmsg_start_pop = 1; txmsg_pop = 2; test_send_many(opt, cgrp); /* Test pop with >4k */ + txmsg_pass = 1; txmsg_start_pop = 4096; txmsg_pop = 4096; test_send_large(opt, cgrp); @@ -1644,6 +1734,12 @@ static void test_txmsg_pop(int cgrp, struct sockmap_options *opt) txmsg_pop = 2; test_send_many(opt, cgrp); + /* TODO: Test for pop + cork should be different, + * - It makes the layout of the received data difficult + * - It makes it hard to calculate the total_bytes in the recvmsg + * Temporarily skip the data integrity test for this case now. + */ + opt->data_test = false; /* Test pop + cork */ txmsg_redir = 0; txmsg_cork = 512; @@ -1657,16 +1753,21 @@ static void test_txmsg_pop(int cgrp, struct sockmap_options *opt) txmsg_start_pop = 1; txmsg_pop = 2; test_send_many(opt, cgrp); + opt->data_test = data; } static void test_txmsg_push(int cgrp, struct sockmap_options *opt) { + bool data = opt->data_test; + /* Test basic push */ + txmsg_pass = 1; txmsg_start_push = 1; txmsg_end_push = 1; test_send(opt, cgrp); /* Test push 4kB >4k */ + txmsg_pass = 1; txmsg_start_push = 4096; txmsg_end_push = 4096; test_send_large(opt, cgrp); @@ -1677,19 +1778,64 @@ static void test_txmsg_push(int cgrp, struct sockmap_options *opt) txmsg_end_push = 2; test_send_many(opt, cgrp); + /* TODO: Test for push + cork should be different, + * - It makes the layout of the received data difficult + * - It makes it hard to calculate the total_bytes in the recvmsg + * Temporarily skip the data integrity test for this case now. + */ + opt->data_test = false; /* Test push + cork */ txmsg_redir = 0; txmsg_cork = 512; txmsg_start_push = 1; txmsg_end_push = 2; test_send_many(opt, cgrp); + opt->data_test = data; } static void test_txmsg_push_pop(int cgrp, struct sockmap_options *opt) { + /* Test push/pop range overlapping */ + txmsg_pass = 1; + txmsg_start_push = 1; + txmsg_end_push = 10; + txmsg_start_pop = 5; + txmsg_pop = 4; + test_send_large(opt, cgrp); + + txmsg_pass = 1; txmsg_start_push = 1; txmsg_end_push = 10; txmsg_start_pop = 5; + txmsg_pop = 16; + test_send_large(opt, cgrp); + + txmsg_pass = 1; + txmsg_start_push = 5; + txmsg_end_push = 4; + txmsg_start_pop = 1; + txmsg_pop = 10; + test_send_large(opt, cgrp); + + txmsg_pass = 1; + txmsg_start_push = 5; + txmsg_end_push = 16; + txmsg_start_pop = 1; + txmsg_pop = 10; + test_send_large(opt, cgrp); + + /* Test push/pop range non-overlapping */ + txmsg_pass = 1; + txmsg_start_push = 1; + txmsg_end_push = 10; + txmsg_start_pop = 16; + txmsg_pop = 4; + test_send_large(opt, cgrp); + + txmsg_pass = 1; + txmsg_start_push = 16; + txmsg_end_push = 10; + txmsg_start_pop = 5; txmsg_pop = 4; test_send_large(opt, cgrp); } diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh index 41d0859feb7d..edc56e2cc606 100755 --- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh @@ -11,6 +11,8 @@ ALL_TESTS=" lib_dir=$(dirname "$0") source ${lib_dir}/bond_topo_3d1c.sh +c_maddr="33:33:00:00:00:10" +g_maddr="33:33:00:00:02:54" skip_prio() { @@ -240,6 +242,54 @@ arp_validate_test() done } +# Testing correct multicast groups are added to slaves for ns targets +arp_validate_mcast() +{ + RET=0 + local arp_valid=$(cmd_jq "ip -n ${s_ns} -j -d link show bond0" ".[].linkinfo.info_data.arp_validate") + local active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave") + + for i in $(seq 0 2); do + maddr_list=$(ip -n ${s_ns} maddr show dev eth${i}) + + # arp_valid == 0 or active_slave should not join any maddrs + if { [ "$arp_valid" == "null" ] || [ "eth${i}" == ${active_slave} ]; } && \ + echo "$maddr_list" | grep -qE "${c_maddr}|${g_maddr}"; then + RET=1 + check_err 1 "arp_valid $arp_valid active_slave $active_slave, eth$i has mcast group" + # arp_valid != 0 and backup_slave should join both maddrs + elif [ "$arp_valid" != "null" ] && [ "eth${i}" != ${active_slave} ] && \ + ( ! echo "$maddr_list" | grep -q "${c_maddr}" || \ + ! echo "$maddr_list" | grep -q "${m_maddr}"); then + RET=1 + check_err 1 "arp_valid $arp_valid active_slave $active_slave, eth$i has mcast group" + fi + done + + # Do failover + ip -n ${s_ns} link set ${active_slave} down + # wait for active link change + slowwait 2 active_slave_changed $active_slave + active_slave=$(cmd_jq "ip -n ${s_ns} -d -j link show bond0" ".[].linkinfo.info_data.active_slave") + + for i in $(seq 0 2); do + maddr_list=$(ip -n ${s_ns} maddr show dev eth${i}) + + # arp_valid == 0 or active_slave should not join any maddrs + if { [ "$arp_valid" == "null" ] || [ "eth${i}" == ${active_slave} ]; } && \ + echo "$maddr_list" | grep -qE "${c_maddr}|${g_maddr}"; then + RET=1 + check_err 1 "arp_valid $arp_valid active_slave $active_slave, eth$i has mcast group" + # arp_valid != 0 and backup_slave should join both maddrs + elif [ "$arp_valid" != "null" ] && [ "eth${i}" != ${active_slave} ] && \ + ( ! echo "$maddr_list" | grep -q "${c_maddr}" || \ + ! echo "$maddr_list" | grep -q "${m_maddr}"); then + RET=1 + check_err 1 "arp_valid $arp_valid active_slave $active_slave, eth$i has mcast group" + fi + done +} + arp_validate_arp() { local mode=$1 @@ -261,8 +311,10 @@ arp_validate_ns() fi for val in $(seq 0 6); do - arp_validate_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} arp_validate $val" + arp_validate_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6},${c_ip6} arp_validate $val" log_test "arp_validate" "$mode ns_ip6_target arp_validate $val" + arp_validate_mcast + log_test "arp_validate" "join mcast group" done } diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py index 29995586993c..0b49ce7ae678 100755 --- a/tools/testing/selftests/drivers/net/hw/rss_ctx.py +++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py @@ -215,7 +215,7 @@ def test_rss_queue_reconfigure(cfg, main_ctx=True): defer(ethtool, f"-X {cfg.ifname} default") else: other_key = 'noise' - flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}" + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id}" ntuple = ethtool_create(cfg, "-N", flow) defer(ethtool, f"-N {cfg.ifname} delete {ntuple}") @@ -238,6 +238,32 @@ def test_rss_queue_reconfigure(cfg, main_ctx=True): else: raise Exception(f"Driver didn't prevent us from deactivating a used queue (context {ctx_id})") + if not main_ctx: + ethtool(f"-L {cfg.ifname} combined 4") + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id} action 1" + try: + # this targets queue 4, which doesn't exist + ntuple2 = ethtool_create(cfg, "-N", flow) + except CmdExitFailure: + pass + else: + raise Exception(f"Driver didn't prevent us from targeting a nonexistent queue (context {ctx_id})") + # change the table to target queues 0 and 2 + ethtool(f"-X {cfg.ifname} {ctx_ref} weight 1 0 1 0") + # ntuple rule therefore targets queues 1 and 3 + ntuple2 = ethtool_create(cfg, "-N", flow) + # should replace existing filter + ksft_eq(ntuple, ntuple2) + _send_traffic_check(cfg, port, ctx_ref, { 'target': (1, 3), + 'noise' : (0, 2) }) + # Setting queue count to 3 should fail, queue 3 is used + try: + ethtool(f"-L {cfg.ifname} combined 3") + except CmdExitFailure: + pass + else: + raise Exception(f"Driver didn't prevent us from deactivating a used queue (context {ctx_id})") + def test_rss_resize(cfg): """Test resizing of the RSS table. @@ -429,7 +455,7 @@ def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None): ksft_eq(max(data['rss-indirection-table']), 2 + i * 2 + 1, "Unexpected context cfg: " + str(data)) ports.append(rand_port()) - flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}" + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {ports[i]} context {ctx_id}" ntuple = ethtool_create(cfg, "-N", flow) defer(ethtool, f"-N {cfg.ifname} delete {ntuple}") @@ -516,7 +542,7 @@ def test_rss_context_out_of_order(cfg, ctx_cnt=4): ctx.append(defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")) ports.append(rand_port()) - flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}" + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {ports[i]} context {ctx_id}" ntuple_id = ethtool_create(cfg, "-N", flow) ntuple.append(defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")) @@ -569,7 +595,7 @@ def test_rss_context_overlap(cfg, other_ctx=0): port = rand_port() if other_ctx: - flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {other_ctx}" + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {other_ctx}" ntuple_id = ethtool_create(cfg, "-N", flow) ntuple = defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}") @@ -587,7 +613,7 @@ def test_rss_context_overlap(cfg, other_ctx=0): # Now create a rule for context 1 and make sure traffic goes to a subset if other_ctx: ntuple.exec() - flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}" + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id}" ntuple_id = ethtool_create(cfg, "-N", flow) defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}") @@ -620,7 +646,7 @@ def test_delete_rss_context_busy(cfg): # utilize context from ntuple filter port = rand_port() - flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}" + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id}" ntuple_id = ethtool_create(cfg, "-N", flow) defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}") @@ -633,6 +659,45 @@ def test_delete_rss_context_busy(cfg): pass +def test_rss_ntuple_addition(cfg): + """ + Test that the queue offset (ring_cookie) of an ntuple rule is added + to the queue number read from the indirection table. + """ + + require_ntuple(cfg) + + queue_cnt = len(_get_rx_cnts(cfg)) + if queue_cnt < 4: + try: + ksft_pr(f"Increasing queue count {queue_cnt} -> 4") + ethtool(f"-L {cfg.ifname} combined 4") + defer(ethtool, f"-L {cfg.ifname} combined {queue_cnt}") + except: + raise KsftSkipEx("Not enough queues for the test") + + # Use queue 0 for normal traffic + ethtool(f"-X {cfg.ifname} equal 1") + defer(ethtool, f"-X {cfg.ifname} default") + + # create additional rss context + ctx_id = ethtool_create(cfg, "-X", "context new equal 2") + defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete") + + # utilize context from ntuple filter + port = rand_port() + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port {port} context {ctx_id} action 2" + try: + ntuple_id = ethtool_create(cfg, "-N", flow) + except CmdExitFailure: + raise KsftSkipEx("Ntuple filter with RSS and nonzero action not supported") + defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}") + + _send_traffic_check(cfg, port, f"context {ctx_id}", { 'target': (2, 3), + 'empty' : (1,), + 'noise' : (0,) }) + + def main() -> None: with NetDrvEpEnv(__file__, nsim_test=False) as cfg: cfg.ethnl = EthtoolFamily() @@ -644,7 +709,7 @@ def main() -> None: test_rss_context_dump, test_rss_context_queue_reconfigure, test_rss_context_overlap, test_rss_context_overlap2, test_rss_context_out_of_order, test_rss_context4_create_with_cfg, - test_delete_rss_context_busy], + test_delete_rss_context_busy, test_rss_ntuple_addition], args=(cfg, )) ksft_exit() diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 156fbfae940f..48645a2e29da 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -241,16 +241,18 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \ -fno-builtin-memcmp -fno-builtin-memcpy \ -fno-builtin-memset -fno-builtin-strnlen \ - -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \ - -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \ - -I$(<D) -Iinclude/$(ARCH_DIR) -I ../rseq -I.. $(EXTRA_CFLAGS) \ - $(KHDR_INCLUDES) + -fno-stack-protector -fno-PIE -fno-strict-aliasing \ + -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_TOOL_ARCH_INCLUDE) \ + -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(ARCH_DIR) \ + -I ../rseq -I.. $(EXTRA_CFLAGS) $(KHDR_INCLUDES) ifeq ($(ARCH),s390) CFLAGS += -march=z10 endif ifeq ($(ARCH),x86) +ifeq ($(shell echo "void foo(void) { }" | $(CC) -march=x86-64-v2 -x c - -c -o /dev/null 2>/dev/null; echo "$$?"),0) CFLAGS += -march=x86-64-v2 endif +endif ifeq ($(ARCH),arm64) tools_dir := $(top_srcdir)/tools arm64_tools_dir := $(tools_dir)/arch/arm64/tools/ diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c index ba0c8e996035..ce687f8d248f 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -134,7 +134,7 @@ static void test_create_guest_memfd_invalid(struct kvm_vm *vm) size); } - for (flag = 0; flag; flag <<= 1) { + for (flag = BIT(0); flag; flag <<= 1) { fd = __vm_create_guest_memfd(vm, page_size, flag); TEST_ASSERT(fd == -1 && errno == EINVAL, "guest_memfd() with flag '0x%lx' should fail with EINVAL", diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c index 089b8925b6b2..d7ac122820bf 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -200,7 +200,7 @@ static inline void init_vmcs_control_fields(struct vmx_pages *vmx) if (vmx->eptp_gpa) { uint64_t ept_paddr; struct eptPageTablePointer eptp = { - .memory_type = VMX_BASIC_MEM_TYPE_WB, + .memory_type = X86_MEMTYPE_WB, .page_walk_length = 3, /* + 1 */ .ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS), .address = vmx->eptp_gpa >> PAGE_SHIFT_4K, diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c index 989ffe0d047f..e3711beff7f3 100644 --- a/tools/testing/selftests/kvm/memslot_perf_test.c +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -417,7 +417,7 @@ static bool _guest_should_exit(void) */ static noinline void host_perform_sync(struct sync_area *sync) { - alarm(2); + alarm(10); atomic_store_explicit(&sync->sync_flag, true, memory_order_release); while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire)) diff --git a/tools/testing/selftests/mm/hugetlb_dio.c b/tools/testing/selftests/mm/hugetlb_dio.c index f9ac20c657ec..432d5af15e66 100644 --- a/tools/testing/selftests/mm/hugetlb_dio.c +++ b/tools/testing/selftests/mm/hugetlb_dio.c @@ -94,8 +94,20 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off) int main(void) { size_t pagesize = 0; + int fd; ksft_print_header(); + + /* Open the file to DIO */ + fd = open("/tmp", O_TMPFILE | O_RDWR | O_DIRECT, 0664); + if (fd < 0) + ksft_exit_skip("Unable to allocate file: %s\n", strerror(errno)); + close(fd); + + /* Check if huge pages are free */ + if (!get_free_hugepages()) + ksft_exit_skip("No free hugepage, exiting\n"); + ksft_set_plan(4); /* Get base page size */ diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index 48973e78d46b..28a715a8ef2b 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -19,6 +19,7 @@ ipv6_flowlabel_mgr log.txt msg_oob msg_zerocopy +netlink-dumps nettest psock_fanout psock_snd diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 2b2a5ec7fa6a..d323898c466c 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -78,6 +78,7 @@ TEST_PROGS += test_vxlan_vnifiltering.sh TEST_GEN_FILES += io_uring_zerocopy_tx TEST_PROGS += io_uring_zerocopy_tx.sh TEST_GEN_FILES += bind_bhash +TEST_GEN_PROGS += netlink-dumps TEST_GEN_PROGS += sk_bind_sendto_listen TEST_GEN_PROGS += sk_connect_zero_addr TEST_GEN_PROGS += sk_so_peek_off diff --git a/tools/testing/selftests/net/netlink-dumps.c b/tools/testing/selftests/net/netlink-dumps.c new file mode 100644 index 000000000000..7ee6dcd334df --- /dev/null +++ b/tools/testing/selftests/net/netlink-dumps.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE + +#include <fcntl.h> +#include <stdio.h> +#include <string.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <unistd.h> + +#include <linux/genetlink.h> +#include <linux/netlink.h> +#include <linux/mqueue.h> + +#include "../kselftest_harness.h" + +static const struct { + struct nlmsghdr nlhdr; + struct genlmsghdr genlhdr; + struct nlattr ahdr; + __u16 val; + __u16 pad; +} dump_policies = { + .nlhdr = { + .nlmsg_len = sizeof(dump_policies), + .nlmsg_type = GENL_ID_CTRL, + .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP, + .nlmsg_seq = 1, + }, + .genlhdr = { + .cmd = CTRL_CMD_GETPOLICY, + .version = 2, + }, + .ahdr = { + .nla_len = 6, + .nla_type = CTRL_ATTR_FAMILY_ID, + }, + .val = GENL_ID_CTRL, + .pad = 0, +}; + +// Sanity check for the test itself, make sure the dump doesn't fit in one msg +TEST(test_sanity) +{ + int netlink_sock; + char buf[8192]; + ssize_t n; + + netlink_sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC); + ASSERT_GE(netlink_sock, 0); + + n = send(netlink_sock, &dump_policies, sizeof(dump_policies), 0); + ASSERT_EQ(n, sizeof(dump_policies)); + + n = recv(netlink_sock, buf, sizeof(buf), MSG_DONTWAIT); + ASSERT_GE(n, sizeof(struct nlmsghdr)); + + n = recv(netlink_sock, buf, sizeof(buf), MSG_DONTWAIT); + ASSERT_GE(n, sizeof(struct nlmsghdr)); + + close(netlink_sock); +} + +TEST(close_in_progress) +{ + int netlink_sock; + ssize_t n; + + netlink_sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC); + ASSERT_GE(netlink_sock, 0); + + n = send(netlink_sock, &dump_policies, sizeof(dump_policies), 0); + ASSERT_EQ(n, sizeof(dump_policies)); + + close(netlink_sock); +} + +TEST(close_with_ref) +{ + char cookie[NOTIFY_COOKIE_LEN] = {}; + int netlink_sock, mq_fd; + struct sigevent sigev; + ssize_t n; + + netlink_sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC); + ASSERT_GE(netlink_sock, 0); + + n = send(netlink_sock, &dump_policies, sizeof(dump_policies), 0); + ASSERT_EQ(n, sizeof(dump_policies)); + + mq_fd = syscall(__NR_mq_open, "sed", O_CREAT | O_WRONLY, 0600, 0); + ASSERT_GE(mq_fd, 0); + + memset(&sigev, 0, sizeof(sigev)); + sigev.sigev_notify = SIGEV_THREAD; + sigev.sigev_value.sival_ptr = cookie; + sigev.sigev_signo = netlink_sock; + + syscall(__NR_mq_notify, mq_fd, &sigev); + + close(netlink_sock); + + // give mqueue time to fire + usleep(100 * 1000); +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json b/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json index 24bd0c2a3014..b2ca9d4e991b 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/u32.json @@ -329,5 +329,29 @@ "teardown": [ "$TC qdisc del dev $DEV1 parent root drr" ] + }, + { + "id": "1234", + "name": "Exercise IDR leaks by creating/deleting a filter many (2048) times", + "category": [ + "filter", + "u32" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DEV1 parent root handle 10: drr", + "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 2 u32 match ip src 0.0.0.2/32 action drop", + "$TC filter add dev $DEV1 parent 10:0 protocol ip prio 3 u32 match ip src 0.0.0.3/32 action drop" + ], + "cmdUnderTest": "bash -c 'for i in {1..2048} ;do echo filter delete dev $DEV1 pref 3;echo filter add dev $DEV1 parent 10:0 protocol ip prio 3 u32 match ip src 0.0.0.3/32 action drop;done | $TC -b -'", + "expExitCode": "0", + "verifyCmd": "$TC filter show dev $DEV1", + "matchPattern": "protocol ip pref 3 u32", + "matchCount": "3", + "teardown": [ + "$TC qdisc del dev $DEV1 parent root drr" + ] } ] diff --git a/tools/virtio/vringh_test.c b/tools/virtio/vringh_test.c index 43d3a6aa1dcf..b9591223437a 100644 --- a/tools/virtio/vringh_test.c +++ b/tools/virtio/vringh_test.c @@ -519,7 +519,7 @@ int main(int argc, char *argv[]) errx(1, "virtqueue_add_sgs: %i", err); __kmalloc_fake = NULL; - /* Host retreives it. */ + /* Host retrieves it. */ vringh_iov_init(&riov, host_riov, ARRAY_SIZE(host_riov)); vringh_iov_init(&wiov, host_wiov, ARRAY_SIZE(host_wiov)); |